applied-ai-018 commited on
Commit
be771ee
·
verified ·
1 Parent(s): fe6b9da

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/datasets/commands/__init__.py +13 -0
  2. env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/datasets/commands/convert.py +195 -0
  4. env-llmeval/lib/python3.10/site-packages/datasets/commands/datasets_cli.py +43 -0
  5. env-llmeval/lib/python3.10/site-packages/datasets/commands/dummy_data.py +468 -0
  6. env-llmeval/lib/python3.10/site-packages/datasets/commands/env.py +41 -0
  7. env-llmeval/lib/python3.10/site-packages/datasets/commands/run_beam.py +165 -0
  8. env-llmeval/lib/python3.10/site-packages/datasets/commands/test.py +201 -0
  9. env-llmeval/lib/python3.10/site-packages/datasets/download/__init__.py +10 -0
  10. env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/datasets/download/download_config.py +102 -0
  16. env-llmeval/lib/python3.10/site-packages/datasets/download/download_manager.py +584 -0
  17. env-llmeval/lib/python3.10/site-packages/datasets/download/mock_download_manager.py +244 -0
  18. env-llmeval/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py +1133 -0
  19. env-llmeval/lib/python3.10/site-packages/datasets/formatting/__init__.py +131 -0
  20. env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/datasets/formatting/formatting.py +649 -0
  27. env-llmeval/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py +160 -0
  28. env-llmeval/lib/python3.10/site-packages/datasets/formatting/np_formatter.py +106 -0
  29. env-llmeval/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py +115 -0
  30. env-llmeval/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py +111 -0
  31. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py +0 -0
  33. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py +0 -0
  36. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/__init__.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py +0 -0
  39. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py +0 -0
  40. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py +31 -0
  43. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py +0 -0
  44. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py +349 -0
  47. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py +0 -0
  48. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py +118 -0
env-llmeval/lib/python3.10/site-packages/datasets/commands/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from argparse import ArgumentParser
3
+
4
+
5
+ class BaseDatasetsCLICommand(ABC):
6
+ @staticmethod
7
+ @abstractmethod
8
+ def register_subcommand(parser: ArgumentParser):
9
+ raise NotImplementedError()
10
+
11
+ @abstractmethod
12
+ def run(self):
13
+ raise NotImplementedError()
env-llmeval/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/commands/convert.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import shutil
4
+ from argparse import ArgumentParser, Namespace
5
+
6
+ from datasets.commands import BaseDatasetsCLICommand
7
+ from datasets.utils.logging import get_logger
8
+
9
+
10
+ HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """
11
+
12
+ HIGHLIGHT_MESSAGE_POST = """=======
13
+ >>>>>>>
14
+ """
15
+
16
+ TO_HIGHLIGHT = [
17
+ "TextEncoderConfig",
18
+ "ByteTextEncoder",
19
+ "SubwordTextEncoder",
20
+ "encoder_config",
21
+ "maybe_build_from_corpus",
22
+ "manual_dir",
23
+ ]
24
+
25
+ TO_CONVERT = [
26
+ # (pattern, replacement)
27
+ # Order is important here for some replacements
28
+ (r"tfds\.core", r"datasets"),
29
+ (r"tf\.io\.gfile\.GFile", r"open"),
30
+ (r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
31
+ (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
32
+ (r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
33
+ (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
34
+ (r"tfds\.features\.FeaturesDict\(", r"dict("),
35
+ (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
36
+ (r"tfds\.", r"datasets."),
37
+ (r"dl_manager\.manual_dir", r"self.config.data_dir"),
38
+ (r"self\.builder_config", r"self.config"),
39
+ ]
40
+
41
+
42
+ def convert_command_factory(args: Namespace):
43
+ """
44
+ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
45
+
46
+ Returns: ConvertCommand
47
+ """
48
+ return ConvertCommand(args.tfds_path, args.datasets_directory)
49
+
50
+
51
+ class ConvertCommand(BaseDatasetsCLICommand):
52
+ @staticmethod
53
+ def register_subcommand(parser: ArgumentParser):
54
+ """
55
+ Register this command to argparse so it's available for the datasets-cli
56
+
57
+ Args:
58
+ parser: Root parser to register command-specific arguments
59
+ """
60
+ train_parser = parser.add_parser(
61
+ "convert",
62
+ help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.",
63
+ )
64
+ train_parser.add_argument(
65
+ "--tfds_path",
66
+ type=str,
67
+ required=True,
68
+ help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
69
+ )
70
+ train_parser.add_argument(
71
+ "--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder."
72
+ )
73
+ train_parser.set_defaults(func=convert_command_factory)
74
+
75
+ def __init__(self, tfds_path: str, datasets_directory: str, *args):
76
+ self._logger = get_logger("datasets-cli/converting")
77
+
78
+ self._tfds_path = tfds_path
79
+ self._datasets_directory = datasets_directory
80
+
81
+ def run(self):
82
+ if os.path.isdir(self._tfds_path):
83
+ abs_tfds_path = os.path.abspath(self._tfds_path)
84
+ elif os.path.isfile(self._tfds_path):
85
+ abs_tfds_path = os.path.dirname(self._tfds_path)
86
+ else:
87
+ raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
88
+
89
+ abs_datasets_path = os.path.abspath(self._datasets_directory)
90
+
91
+ self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
92
+
93
+ utils_files = []
94
+ with_manual_update = []
95
+ imports_to_builder_map = {}
96
+
97
+ if os.path.isdir(self._tfds_path):
98
+ file_names = os.listdir(abs_tfds_path)
99
+ else:
100
+ file_names = [os.path.basename(self._tfds_path)]
101
+
102
+ for f_name in file_names:
103
+ self._logger.info(f"Looking at file {f_name}")
104
+ input_file = os.path.join(abs_tfds_path, f_name)
105
+ output_file = os.path.join(abs_datasets_path, f_name)
106
+
107
+ if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
108
+ self._logger.info("Skipping file")
109
+ continue
110
+
111
+ with open(input_file, encoding="utf-8") as f:
112
+ lines = f.readlines()
113
+
114
+ out_lines = []
115
+ is_builder = False
116
+ needs_manual_update = False
117
+ tfds_imports = []
118
+ for line in lines:
119
+ out_line = line
120
+
121
+ # Convert imports
122
+ if "import tensorflow.compat.v2 as tf" in out_line:
123
+ continue
124
+ elif "@tfds.core" in out_line:
125
+ continue
126
+ elif "builder=self" in out_line:
127
+ continue
128
+ elif "import tensorflow_datasets.public_api as tfds" in out_line:
129
+ out_line = "import datasets\n"
130
+ elif "import tensorflow" in out_line:
131
+ # order is important here
132
+ out_line = ""
133
+ continue
134
+ elif "from absl import logging" in out_line:
135
+ out_line = "from datasets import logging\n"
136
+ elif "getLogger" in out_line:
137
+ out_line = out_line.replace("getLogger", "get_logger")
138
+ elif any(expression in out_line for expression in TO_HIGHLIGHT):
139
+ needs_manual_update = True
140
+ to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
141
+ out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
142
+ out_lines.append(out_line)
143
+ out_lines.append(HIGHLIGHT_MESSAGE_POST)
144
+ continue
145
+ else:
146
+ for pattern, replacement in TO_CONVERT:
147
+ out_line = re.sub(pattern, replacement, out_line)
148
+
149
+ # Take care of saving utilities (to later move them together with main script)
150
+ if "tensorflow_datasets" in out_line:
151
+ match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
152
+ tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
153
+ out_line = "from . import " + match.group(1)
154
+
155
+ # Check we have not forget anything
156
+ if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
157
+ raise ValueError(f"Error converting {out_line.strip()}")
158
+
159
+ if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
160
+ is_builder = True
161
+ out_lines.append(out_line)
162
+
163
+ if is_builder or "wmt" in f_name:
164
+ # We create a new directory for each dataset
165
+ dir_name = f_name.replace(".py", "")
166
+ output_dir = os.path.join(abs_datasets_path, dir_name)
167
+ output_file = os.path.join(output_dir, f_name)
168
+ os.makedirs(output_dir, exist_ok=True)
169
+ self._logger.info(f"Adding directory {output_dir}")
170
+ imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
171
+ else:
172
+ # Utilities will be moved at the end
173
+ utils_files.append(output_file)
174
+
175
+ if needs_manual_update:
176
+ with_manual_update.append(output_file)
177
+
178
+ with open(output_file, "w", encoding="utf-8") as f:
179
+ f.writelines(out_lines)
180
+ self._logger.info(f"Converted in {output_file}")
181
+
182
+ for utils_file in utils_files:
183
+ try:
184
+ f_name = os.path.basename(utils_file)
185
+ dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
186
+ self._logger.info(f"Moving {dest_folder} to {utils_file}")
187
+ shutil.copy(utils_file, dest_folder)
188
+ except KeyError:
189
+ self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
190
+
191
+ if with_manual_update:
192
+ for file_path in with_manual_update:
193
+ self._logger.warning(
194
+ f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
195
+ )
env-llmeval/lib/python3.10/site-packages/datasets/commands/datasets_cli.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ from argparse import ArgumentParser
3
+
4
+ from datasets.commands.convert import ConvertCommand
5
+ from datasets.commands.dummy_data import DummyDataCommand
6
+ from datasets.commands.env import EnvironmentCommand
7
+ from datasets.commands.run_beam import RunBeamCommand
8
+ from datasets.commands.test import TestCommand
9
+ from datasets.utils.logging import set_verbosity_info
10
+
11
+
12
+ def parse_unknown_args(unknown_args):
13
+ return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
14
+
15
+
16
+ def main():
17
+ parser = ArgumentParser(
18
+ "HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
19
+ )
20
+ commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
21
+ set_verbosity_info()
22
+
23
+ # Register commands
24
+ ConvertCommand.register_subcommand(commands_parser)
25
+ EnvironmentCommand.register_subcommand(commands_parser)
26
+ TestCommand.register_subcommand(commands_parser)
27
+ RunBeamCommand.register_subcommand(commands_parser)
28
+ DummyDataCommand.register_subcommand(commands_parser)
29
+
30
+ # Parse args
31
+ args, unknown_args = parser.parse_known_args()
32
+ if not hasattr(args, "func"):
33
+ parser.print_help()
34
+ exit(1)
35
+ kwargs = parse_unknown_args(unknown_args)
36
+
37
+ # Run
38
+ service = args.func(args, **kwargs)
39
+ service.run()
40
+
41
+
42
+ if __name__ == "__main__":
43
+ main()
env-llmeval/lib/python3.10/site-packages/datasets/commands/dummy_data.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fnmatch
2
+ import json
3
+ import os
4
+ import shutil
5
+ import tempfile
6
+ import xml.etree.ElementTree as ET
7
+ from argparse import ArgumentParser
8
+ from pathlib import Path
9
+ from typing import Optional
10
+
11
+ from datasets import config
12
+ from datasets.commands import BaseDatasetsCLICommand
13
+ from datasets.download.download_config import DownloadConfig
14
+ from datasets.download.download_manager import DownloadManager
15
+ from datasets.download.mock_download_manager import MockDownloadManager
16
+ from datasets.load import dataset_module_factory, import_main_class
17
+ from datasets.utils.deprecation_utils import deprecated
18
+ from datasets.utils.logging import get_logger, set_verbosity_warning
19
+ from datasets.utils.py_utils import map_nested
20
+
21
+
22
+ logger = get_logger(__name__)
23
+
24
+ DEFAULT_ENCODING = "utf-8"
25
+
26
+
27
+ def dummy_data_command_factory(args):
28
+ return DummyDataCommand(
29
+ args.path_to_dataset,
30
+ args.auto_generate,
31
+ args.n_lines,
32
+ args.json_field,
33
+ args.xml_tag,
34
+ args.match_text_files,
35
+ args.keep_uncompressed,
36
+ args.cache_dir,
37
+ args.encoding,
38
+ )
39
+
40
+
41
+ class DummyDataGeneratorDownloadManager(DownloadManager):
42
+ def __init__(self, mock_download_manager, *args, **kwargs):
43
+ super().__init__(*args, **kwargs)
44
+ self.mock_download_manager = mock_download_manager
45
+ self.downloaded_dummy_paths = []
46
+ self.expected_dummy_paths = []
47
+
48
+ def download(self, url_or_urls):
49
+ output = super().download(url_or_urls)
50
+ dummy_output = self.mock_download_manager.download(url_or_urls)
51
+ map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
52
+ map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
53
+ return output
54
+
55
+ def download_and_extract(self, url_or_urls):
56
+ output = super().extract(super().download(url_or_urls))
57
+ dummy_output = self.mock_download_manager.download(url_or_urls)
58
+ map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
59
+ map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
60
+ return output
61
+
62
+ def auto_generate_dummy_data_folder(
63
+ self,
64
+ n_lines: int = 5,
65
+ json_field: Optional[str] = None,
66
+ xml_tag: Optional[str] = None,
67
+ match_text_files: Optional[str] = None,
68
+ encoding: Optional[str] = None,
69
+ ) -> bool:
70
+ os.makedirs(
71
+ os.path.join(
72
+ self.mock_download_manager.datasets_scripts_dir,
73
+ self.mock_download_manager.dataset_name,
74
+ self.mock_download_manager.dummy_data_folder,
75
+ "dummy_data",
76
+ ),
77
+ exist_ok=True,
78
+ )
79
+ total = 0
80
+ self.mock_download_manager.load_existing_dummy_data = False
81
+ for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths):
82
+ dst_path = os.path.join(
83
+ self.mock_download_manager.datasets_scripts_dir,
84
+ self.mock_download_manager.dataset_name,
85
+ self.mock_download_manager.dummy_data_folder,
86
+ relative_dst_path,
87
+ )
88
+ total += self._create_dummy_data(
89
+ src_path,
90
+ dst_path,
91
+ n_lines=n_lines,
92
+ json_field=json_field,
93
+ xml_tag=xml_tag,
94
+ match_text_files=match_text_files,
95
+ encoding=encoding,
96
+ )
97
+ if total == 0:
98
+ logger.error(
99
+ "Dummy data generation failed: no dummy files were created. "
100
+ "Make sure the data files format is supported by the auto-generation."
101
+ )
102
+ return total > 0
103
+
104
+ def _create_dummy_data(
105
+ self,
106
+ src_path: str,
107
+ dst_path: str,
108
+ n_lines: int,
109
+ json_field: Optional[str] = None,
110
+ xml_tag: Optional[str] = None,
111
+ match_text_files: Optional[str] = None,
112
+ encoding: Optional[str] = None,
113
+ ) -> int:
114
+ encoding = encoding or DEFAULT_ENCODING
115
+ if os.path.isfile(src_path):
116
+ logger.debug(f"Trying to generate dummy data file {dst_path}")
117
+ dst_path_extensions = Path(dst_path).suffixes
118
+ line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"]
119
+ is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions)
120
+ if match_text_files is not None:
121
+ file_name = os.path.basename(dst_path)
122
+ for pattern in match_text_files.split(","):
123
+ is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern)
124
+ # Line by line text file (txt, csv etc.)
125
+ if is_line_by_line_text_file:
126
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
127
+ with open(src_path, encoding=encoding) as src_file:
128
+ with open(dst_path, "w", encoding=encoding) as dst_file:
129
+ first_lines = []
130
+ for i, line in enumerate(src_file):
131
+ if i >= n_lines:
132
+ break
133
+ first_lines.append(line)
134
+ dst_file.write("".join(first_lines).strip())
135
+ return 1
136
+ # json file
137
+ elif ".json" in dst_path_extensions:
138
+ with open(src_path, encoding=encoding) as src_file:
139
+ json_data = json.load(src_file)
140
+ if json_field is not None:
141
+ json_data = json_data[json_field]
142
+ if isinstance(json_data, dict):
143
+ if not all(isinstance(v, list) for v in json_data.values()):
144
+ raise ValueError(
145
+ f"Couldn't parse columns {list(json_data.keys())}. "
146
+ "Maybe specify which json field must be used "
147
+ "to read the data with --json_field <my_field>."
148
+ )
149
+ first_json_data = {k: v[:n_lines] for k, v in json_data.items()}
150
+ else:
151
+ first_json_data = json_data[:n_lines]
152
+ if json_field is not None:
153
+ first_json_data = {json_field: first_json_data}
154
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
155
+ with open(dst_path, "w", encoding=encoding) as dst_file:
156
+ json.dump(first_json_data, dst_file)
157
+ return 1
158
+ # xml file
159
+ elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]):
160
+ if xml_tag is None:
161
+ logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag")
162
+ else:
163
+ self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding)
164
+ return 1
165
+ logger.warning(
166
+ f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data."
167
+ )
168
+ return 0
169
+ # directory, iterate through all files
170
+ elif os.path.isdir(src_path):
171
+ total = 0
172
+ for path, _, files in os.walk(src_path):
173
+ for name in files:
174
+ if not name.startswith("."): # ignore files like .DS_Store etc.
175
+ src_file_path = os.path.join(path, name)
176
+ dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path))
177
+ total += self._create_dummy_data(
178
+ src_file_path,
179
+ dst_file_path,
180
+ n_lines=n_lines,
181
+ json_field=json_field,
182
+ xml_tag=xml_tag,
183
+ match_text_files=match_text_files,
184
+ encoding=encoding,
185
+ )
186
+ return total
187
+
188
+ @staticmethod
189
+ def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING):
190
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
191
+ with open(src_path, encoding=encoding) as src_file:
192
+ n_line = 0
193
+ parents = []
194
+ for event, elem in ET.iterparse(src_file, events=("start", "end")):
195
+ if event == "start":
196
+ parents.append(elem)
197
+ else:
198
+ _ = parents.pop()
199
+ if elem.tag == xml_tag:
200
+ if n_line < n_lines:
201
+ n_line += 1
202
+ else:
203
+ if parents:
204
+ parents[-1].remove(elem)
205
+ ET.ElementTree(element=elem).write(dst_path, encoding=encoding)
206
+
207
+ def compress_autogenerated_dummy_data(self, path_to_dataset):
208
+ root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder)
209
+ base_name = os.path.join(root_dir, "dummy_data")
210
+ base_dir = "dummy_data"
211
+ logger.info(f"Compressing dummy data folder to '{base_name}.zip'")
212
+ shutil.make_archive(base_name, "zip", root_dir, base_dir)
213
+ shutil.rmtree(base_name)
214
+
215
+
216
+ @deprecated(
217
+ "The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI."
218
+ )
219
+ class DummyDataCommand(BaseDatasetsCLICommand):
220
+ @staticmethod
221
+ def register_subcommand(parser: ArgumentParser):
222
+ test_parser = parser.add_parser("dummy_data", help="Generate dummy data.")
223
+ test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data")
224
+ test_parser.add_argument(
225
+ "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data"
226
+ )
227
+ test_parser.add_argument(
228
+ "--json_field",
229
+ type=str,
230
+ default=None,
231
+ help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)",
232
+ )
233
+ test_parser.add_argument(
234
+ "--xml_tag",
235
+ type=str,
236
+ default=None,
237
+ help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.",
238
+ )
239
+ test_parser.add_argument(
240
+ "--match_text_files",
241
+ type=str,
242
+ default=None,
243
+ help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label",
244
+ )
245
+ test_parser.add_argument(
246
+ "--keep_uncompressed",
247
+ action="store_true",
248
+ help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.",
249
+ )
250
+ test_parser.add_argument(
251
+ "--cache_dir",
252
+ type=str,
253
+ default=None,
254
+ help="Cache directory to download and cache files when auto-generating dummy data",
255
+ )
256
+ test_parser.add_argument(
257
+ "--encoding",
258
+ type=str,
259
+ default=None,
260
+ help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}",
261
+ )
262
+ test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)")
263
+ test_parser.set_defaults(func=dummy_data_command_factory)
264
+
265
+ def __init__(
266
+ self,
267
+ path_to_dataset: str,
268
+ auto_generate: bool,
269
+ n_lines: int,
270
+ json_field: Optional[str],
271
+ xml_tag: Optional[str],
272
+ match_text_files: Optional[str],
273
+ keep_uncompressed: bool,
274
+ cache_dir: Optional[str],
275
+ encoding: Optional[str],
276
+ ):
277
+ self._path_to_dataset = path_to_dataset
278
+ if os.path.isdir(path_to_dataset):
279
+ self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1]
280
+ else:
281
+ self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2]
282
+ cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE)
283
+ self._auto_generate = auto_generate
284
+ self._n_lines = n_lines
285
+ self._json_field = json_field
286
+ self._xml_tag = xml_tag
287
+ self._match_text_files = match_text_files
288
+ self._keep_uncompressed = keep_uncompressed
289
+ self._cache_dir = cache_dir
290
+ self._encoding = encoding
291
+
292
+ def run(self):
293
+ set_verbosity_warning()
294
+ dataset_module = dataset_module_factory(self._path_to_dataset)
295
+ builder_cls = import_main_class(dataset_module.module_path)
296
+
297
+ # use `None` as config if no configs
298
+ builder_configs = builder_cls.BUILDER_CONFIGS or [None]
299
+ auto_generate_results = []
300
+ with tempfile.TemporaryDirectory() as tmp_dir:
301
+ for builder_config in builder_configs:
302
+ config_name = builder_config.name if builder_config else None
303
+ dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir)
304
+ version = builder_config.version if builder_config else dataset_builder.config.version
305
+ mock_dl_manager = MockDownloadManager(
306
+ dataset_name=self._dataset_name,
307
+ config=builder_config,
308
+ version=version,
309
+ use_local_dummy_data=True,
310
+ load_existing_dummy_data=False,
311
+ )
312
+
313
+ if self._auto_generate:
314
+ auto_generate_results.append(
315
+ self._autogenerate_dummy_data(
316
+ dataset_builder=dataset_builder,
317
+ mock_dl_manager=mock_dl_manager,
318
+ keep_uncompressed=self._keep_uncompressed,
319
+ )
320
+ )
321
+ else:
322
+ self._print_dummy_data_instructions(
323
+ dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager
324
+ )
325
+ if self._auto_generate and not self._keep_uncompressed:
326
+ if all(auto_generate_results):
327
+ print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'")
328
+ else:
329
+ print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'")
330
+
331
+ def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]:
332
+ dl_cache_dir = (
333
+ os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR)
334
+ if self._cache_dir
335
+ else config.DOWNLOADED_DATASETS_PATH
336
+ )
337
+ download_config = DownloadConfig(cache_dir=dl_cache_dir)
338
+ dl_manager = DummyDataGeneratorDownloadManager(
339
+ dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config
340
+ )
341
+ dataset_builder._split_generators(dl_manager)
342
+ mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data
343
+ dl_manager.auto_generate_dummy_data_folder(
344
+ n_lines=self._n_lines,
345
+ json_field=self._json_field,
346
+ xml_tag=self._xml_tag,
347
+ match_text_files=self._match_text_files,
348
+ encoding=self._encoding,
349
+ )
350
+ if not keep_uncompressed:
351
+ path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name)
352
+ dl_manager.compress_autogenerated_dummy_data(path_do_dataset)
353
+ # now test that the dummy_data.zip file actually works
354
+ mock_dl_manager.load_existing_dummy_data = True # use real dummy data
355
+ n_examples_per_split = {}
356
+ os.makedirs(dataset_builder._cache_dir, exist_ok=True)
357
+ try:
358
+ split_generators = dataset_builder._split_generators(mock_dl_manager)
359
+ for split_generator in split_generators:
360
+ dataset_builder._prepare_split(split_generator, check_duplicate_keys=False)
361
+ n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples
362
+ except OSError as e:
363
+ logger.error(
364
+ f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n"
365
+ + str(e)
366
+ )
367
+ return False
368
+ else:
369
+ if all(n_examples > 0 for n_examples in n_examples_per_split.values()):
370
+ logger.warning(
371
+ f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''."
372
+ )
373
+ return True
374
+ else:
375
+ empty_splits = [
376
+ split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0
377
+ ]
378
+ logger.warning(
379
+ f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''."
380
+ )
381
+ return False
382
+ else:
383
+ generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
384
+ logger.info(
385
+ f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. "
386
+ "Please compress this directory into a zip file to use it for dummy data tests."
387
+ )
388
+
389
+ def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager):
390
+ dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
391
+ logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ")
392
+ os.makedirs(dummy_data_folder, exist_ok=True)
393
+
394
+ try:
395
+ generator_splits = dataset_builder._split_generators(mock_dl_manager)
396
+ except FileNotFoundError as e:
397
+ print(
398
+ f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}."
399
+ )
400
+
401
+ files_to_create = set()
402
+ split_names = []
403
+ dummy_file_name = mock_dl_manager.dummy_file_name
404
+
405
+ for split in generator_splits:
406
+ logger.info(f"Collecting dummy data file paths to create for {split.name}")
407
+ split_names.append(split.name)
408
+ gen_kwargs = split.gen_kwargs
409
+ generator = dataset_builder._generate_examples(**gen_kwargs)
410
+
411
+ try:
412
+ dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n"
413
+ config_string = (
414
+ f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else ""
415
+ )
416
+ dummy_data_guidance_print += (
417
+ "- In order to create the dummy data for "
418
+ + config_string
419
+ + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n"
420
+ )
421
+
422
+ # trigger generate function
423
+ for key, record in generator:
424
+ pass
425
+
426
+ dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n"
427
+
428
+ except FileNotFoundError as e:
429
+ files_to_create.add(e.filename)
430
+
431
+ split_names = ", ".join(split_names)
432
+ if len(files_to_create) > 0:
433
+ # no glob.glob(...) in `_generate_examples(...)`
434
+ if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
435
+ dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n"
436
+ files_string = dummy_file_name
437
+ else:
438
+ files_string = ", ".join(files_to_create)
439
+ dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n"
440
+
441
+ dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n"
442
+
443
+ dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n"
444
+
445
+ if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
446
+ dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n"
447
+
448
+ dummy_data_guidance_print += (
449
+ f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n"
450
+ )
451
+
452
+ dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
453
+ else:
454
+ dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n"
455
+
456
+ dummy_data_guidance_print += (
457
+ f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n"
458
+ )
459
+
460
+ dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
461
+
462
+ dummy_data_guidance_print += (
463
+ f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n"
464
+ )
465
+
466
+ dummy_data_guidance_print += 83 * "=" + "\n"
467
+
468
+ print(dummy_data_guidance_print)
env-llmeval/lib/python3.10/site-packages/datasets/commands/env.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+ from argparse import ArgumentParser
3
+
4
+ import fsspec
5
+ import huggingface_hub
6
+ import pandas
7
+ import pyarrow
8
+
9
+ from datasets import __version__ as version
10
+ from datasets.commands import BaseDatasetsCLICommand
11
+
12
+
13
+ def info_command_factory(_):
14
+ return EnvironmentCommand()
15
+
16
+
17
+ class EnvironmentCommand(BaseDatasetsCLICommand):
18
+ @staticmethod
19
+ def register_subcommand(parser: ArgumentParser):
20
+ download_parser = parser.add_parser("env", help="Print relevant system environment info.")
21
+ download_parser.set_defaults(func=info_command_factory)
22
+
23
+ def run(self):
24
+ info = {
25
+ "`datasets` version": version,
26
+ "Platform": platform.platform(),
27
+ "Python version": platform.python_version(),
28
+ "`huggingface_hub` version": huggingface_hub.__version__,
29
+ "PyArrow version": pyarrow.__version__,
30
+ "Pandas version": pandas.__version__,
31
+ "`fsspec` version": fsspec.__version__,
32
+ }
33
+
34
+ print("\nCopy-and-paste the text below in your GitHub issue.\n")
35
+ print(self.format_dict(info))
36
+
37
+ return info
38
+
39
+ @staticmethod
40
+ def format_dict(d):
41
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
env-llmeval/lib/python3.10/site-packages/datasets/commands/run_beam.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from argparse import ArgumentParser
3
+ from pathlib import Path
4
+ from shutil import copyfile
5
+ from typing import List
6
+
7
+ from datasets import config
8
+ from datasets.builder import DatasetBuilder
9
+ from datasets.commands import BaseDatasetsCLICommand
10
+ from datasets.download.download_config import DownloadConfig
11
+ from datasets.download.download_manager import DownloadMode
12
+ from datasets.load import dataset_module_factory, import_main_class
13
+ from datasets.utils.info_utils import VerificationMode
14
+
15
+
16
+ def run_beam_command_factory(args, **kwargs):
17
+ return RunBeamCommand(
18
+ args.dataset,
19
+ args.name,
20
+ args.cache_dir,
21
+ args.beam_pipeline_options,
22
+ args.data_dir,
23
+ args.all_configs,
24
+ args.save_info or args.save_infos,
25
+ args.ignore_verifications,
26
+ args.force_redownload,
27
+ **kwargs,
28
+ )
29
+
30
+
31
+ class RunBeamCommand(BaseDatasetsCLICommand):
32
+ @staticmethod
33
+ def register_subcommand(parser: ArgumentParser):
34
+ run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline")
35
+ run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
36
+ run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name")
37
+ run_beam_parser.add_argument(
38
+ "--cache_dir",
39
+ type=str,
40
+ default=None,
41
+ help="Cache directory where the datasets are stored",
42
+ )
43
+ run_beam_parser.add_argument(
44
+ "--beam_pipeline_options",
45
+ type=str,
46
+ default="",
47
+ help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`",
48
+ )
49
+ run_beam_parser.add_argument(
50
+ "--data_dir",
51
+ type=str,
52
+ default=None,
53
+ help="Can be used to specify a manual directory to get the files from",
54
+ )
55
+ run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
56
+ run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file")
57
+ run_beam_parser.add_argument(
58
+ "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
59
+ )
60
+ run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
61
+ # aliases
62
+ run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info")
63
+ run_beam_parser.set_defaults(func=run_beam_command_factory)
64
+
65
+ def __init__(
66
+ self,
67
+ dataset: str,
68
+ name: str,
69
+ cache_dir: str,
70
+ beam_pipeline_options: str,
71
+ data_dir: str,
72
+ all_configs: bool,
73
+ save_infos: bool,
74
+ ignore_verifications: bool,
75
+ force_redownload: bool,
76
+ **config_kwargs,
77
+ ):
78
+ self._dataset = dataset
79
+ self._name = name
80
+ self._cache_dir = cache_dir
81
+ self._beam_pipeline_options = beam_pipeline_options
82
+ self._data_dir = data_dir
83
+ self._all_configs = all_configs
84
+ self._save_infos = save_infos
85
+ self._ignore_verifications = ignore_verifications
86
+ self._force_redownload = force_redownload
87
+ self._config_kwargs = config_kwargs
88
+
89
+ def run(self):
90
+ import apache_beam as beam
91
+
92
+ if self._name is not None and self._all_configs:
93
+ print("Both parameters `name` and `all_configs` can't be used at once.")
94
+ exit(1)
95
+ path, config_name = self._dataset, self._name
96
+ dataset_module = dataset_module_factory(path)
97
+ builder_cls = import_main_class(dataset_module.module_path)
98
+ builders: List[DatasetBuilder] = []
99
+ if self._beam_pipeline_options:
100
+ beam_options = beam.options.pipeline_options.PipelineOptions(
101
+ flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt]
102
+ )
103
+ else:
104
+ beam_options = None
105
+ if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0:
106
+ for builder_config in builder_cls.BUILDER_CONFIGS:
107
+ builders.append(
108
+ builder_cls(
109
+ config_name=builder_config.name,
110
+ data_dir=self._data_dir,
111
+ hash=dataset_module.hash,
112
+ beam_options=beam_options,
113
+ cache_dir=self._cache_dir,
114
+ base_path=dataset_module.builder_kwargs.get("base_path"),
115
+ )
116
+ )
117
+ else:
118
+ builders.append(
119
+ builder_cls(
120
+ config_name=config_name,
121
+ data_dir=self._data_dir,
122
+ beam_options=beam_options,
123
+ cache_dir=self._cache_dir,
124
+ base_path=dataset_module.builder_kwargs.get("base_path"),
125
+ **self._config_kwargs,
126
+ )
127
+ )
128
+
129
+ for builder in builders:
130
+ builder.download_and_prepare(
131
+ download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
132
+ if not self._force_redownload
133
+ else DownloadMode.FORCE_REDOWNLOAD,
134
+ download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH),
135
+ verification_mode=VerificationMode.NO_CHECKS
136
+ if self._ignore_verifications
137
+ else VerificationMode.ALL_CHECKS,
138
+ try_from_hf_gcs=False,
139
+ )
140
+ if self._save_infos:
141
+ builder._save_infos()
142
+
143
+ print("Apache beam run successful.")
144
+
145
+ # If save_infos=True, the dataset infos file is created next to the loaded module file.
146
+ # Let's move it to the original directory of the dataset script, to allow the user to
147
+ # upload them on S3 at the same time afterwards.
148
+ if self._save_infos:
149
+ dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)
150
+
151
+ name = Path(path).name + ".py"
152
+
153
+ combined_path = os.path.join(path, name)
154
+ if os.path.isfile(path):
155
+ dataset_dir = os.path.dirname(path)
156
+ elif os.path.isfile(combined_path):
157
+ dataset_dir = path
158
+ else: # in case of a remote dataset
159
+ print(f"Dataset Infos file saved at {dataset_infos_path}")
160
+ exit(1)
161
+
162
+ # Move datasetinfo back to the user
163
+ user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME)
164
+ copyfile(dataset_infos_path, user_dataset_infos_path)
165
+ print(f"Dataset Infos file saved at {user_dataset_infos_path}")
env-llmeval/lib/python3.10/site-packages/datasets/commands/test.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from argparse import ArgumentParser
4
+ from pathlib import Path
5
+ from shutil import copyfile, rmtree
6
+ from typing import Generator
7
+
8
+ import datasets.config
9
+ from datasets.builder import DatasetBuilder
10
+ from datasets.commands import BaseDatasetsCLICommand
11
+ from datasets.download.download_manager import DownloadMode
12
+ from datasets.load import dataset_module_factory, import_main_class
13
+ from datasets.utils.info_utils import VerificationMode
14
+ from datasets.utils.logging import ERROR, get_logger
15
+
16
+
17
+ logger = get_logger(__name__)
18
+
19
+
20
+ def _test_command_factory(args):
21
+ return TestCommand(
22
+ args.dataset,
23
+ args.name,
24
+ args.cache_dir,
25
+ args.data_dir,
26
+ args.all_configs,
27
+ args.save_info or args.save_infos,
28
+ args.ignore_verifications,
29
+ args.force_redownload,
30
+ args.clear_cache,
31
+ args.num_proc,
32
+ )
33
+
34
+
35
+ class TestCommand(BaseDatasetsCLICommand):
36
+ __test__ = False # to tell pytest it's not a test class
37
+
38
+ @staticmethod
39
+ def register_subcommand(parser: ArgumentParser):
40
+ test_parser = parser.add_parser("test", help="Test dataset implementation.")
41
+ test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
42
+ test_parser.add_argument(
43
+ "--cache_dir",
44
+ type=str,
45
+ default=None,
46
+ help="Cache directory where the datasets are stored.",
47
+ )
48
+ test_parser.add_argument(
49
+ "--data_dir",
50
+ type=str,
51
+ default=None,
52
+ help="Can be used to specify a manual directory to get the files from.",
53
+ )
54
+ test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
55
+ test_parser.add_argument(
56
+ "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)"
57
+ )
58
+ test_parser.add_argument(
59
+ "--ignore_verifications",
60
+ action="store_true",
61
+ help="Run the test without checksums and splits checks.",
62
+ )
63
+ test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
64
+ test_parser.add_argument(
65
+ "--clear_cache",
66
+ action="store_true",
67
+ help="Remove downloaded files and cached datasets after each config test",
68
+ )
69
+ test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes")
70
+ # aliases
71
+ test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info")
72
+ test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
73
+ test_parser.set_defaults(func=_test_command_factory)
74
+
75
+ def __init__(
76
+ self,
77
+ dataset: str,
78
+ name: str,
79
+ cache_dir: str,
80
+ data_dir: str,
81
+ all_configs: bool,
82
+ save_infos: bool,
83
+ ignore_verifications: bool,
84
+ force_redownload: bool,
85
+ clear_cache: bool,
86
+ num_proc: int,
87
+ ):
88
+ self._dataset = dataset
89
+ self._name = name
90
+ self._cache_dir = cache_dir
91
+ self._data_dir = data_dir
92
+ self._all_configs = all_configs
93
+ self._save_infos = save_infos
94
+ self._ignore_verifications = ignore_verifications
95
+ self._force_redownload = force_redownload
96
+ self._clear_cache = clear_cache
97
+ self._num_proc = num_proc
98
+ if clear_cache and not cache_dir:
99
+ print(
100
+ "When --clear_cache is used, specifying a cache directory is mandatory.\n"
101
+ "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n"
102
+ "Please provide a --cache_dir that will be used to test the dataset script."
103
+ )
104
+ exit(1)
105
+ if save_infos:
106
+ self._ignore_verifications = True
107
+
108
+ def run(self):
109
+ logging.getLogger("filelock").setLevel(ERROR)
110
+ if self._name is not None and self._all_configs:
111
+ print("Both parameters `config` and `all_configs` can't be used at once.")
112
+ exit(1)
113
+ path, config_name = self._dataset, self._name
114
+ module = dataset_module_factory(path)
115
+ builder_cls = import_main_class(module.module_path)
116
+ n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1
117
+
118
+ def get_builders() -> Generator[DatasetBuilder, None, None]:
119
+ if self._all_configs and builder_cls.BUILDER_CONFIGS:
120
+ for i, config in enumerate(builder_cls.BUILDER_CONFIGS):
121
+ if "config_name" in module.builder_kwargs:
122
+ yield builder_cls(
123
+ cache_dir=self._cache_dir,
124
+ data_dir=self._data_dir,
125
+ **module.builder_kwargs,
126
+ )
127
+ else:
128
+ yield builder_cls(
129
+ config_name=config.name,
130
+ cache_dir=self._cache_dir,
131
+ data_dir=self._data_dir,
132
+ **module.builder_kwargs,
133
+ )
134
+ else:
135
+ if "config_name" in module.builder_kwargs:
136
+ yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs)
137
+ else:
138
+ yield builder_cls(
139
+ config_name=config_name,
140
+ cache_dir=self._cache_dir,
141
+ data_dir=self._data_dir,
142
+ **module.builder_kwargs,
143
+ )
144
+
145
+ for j, builder in enumerate(get_builders()):
146
+ print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})")
147
+ builder._record_infos = os.path.exists(
148
+ os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME)
149
+ ) # record checksums only if we need to update a (deprecated) dataset_infos.json
150
+ builder.download_and_prepare(
151
+ download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
152
+ if not self._force_redownload
153
+ else DownloadMode.FORCE_REDOWNLOAD,
154
+ verification_mode=VerificationMode.NO_CHECKS
155
+ if self._ignore_verifications
156
+ else VerificationMode.ALL_CHECKS,
157
+ try_from_hf_gcs=False,
158
+ num_proc=self._num_proc,
159
+ )
160
+ builder.as_dataset()
161
+ if self._save_infos:
162
+ builder._save_infos()
163
+
164
+ # If save_infos=True, the dataset card (README.md) is created next to the loaded module file.
165
+ # The dataset_infos are saved in the YAML part of the README.md
166
+
167
+ # Let's move it to the original directory of the dataset script, to allow the user to
168
+ # upload them on S3 at the same time afterwards.
169
+ if self._save_infos:
170
+ dataset_readme_path = os.path.join(
171
+ builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME
172
+ )
173
+ name = Path(path).name + ".py"
174
+ combined_path = os.path.join(path, name)
175
+ if os.path.isfile(path):
176
+ dataset_dir = os.path.dirname(path)
177
+ elif os.path.isfile(combined_path):
178
+ dataset_dir = path
179
+ elif os.path.isdir(path): # for local directories containing only data files
180
+ dataset_dir = path
181
+ else: # in case of a remote dataset
182
+ dataset_dir = None
183
+ print(f"Dataset card saved at {dataset_readme_path}")
184
+
185
+ # Move dataset_info back to the user
186
+ if dataset_dir is not None:
187
+ user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME)
188
+ copyfile(dataset_readme_path, user_dataset_readme_path)
189
+ print(f"Dataset card saved at {user_dataset_readme_path}")
190
+
191
+ # If clear_cache=True, the download folder and the dataset builder cache directory are deleted
192
+ if self._clear_cache:
193
+ if os.path.isdir(builder._cache_dir):
194
+ logger.warning(f"Clearing cache at {builder._cache_dir}")
195
+ rmtree(builder._cache_dir)
196
+ download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR)
197
+ if os.path.isdir(download_dir):
198
+ logger.warning(f"Clearing cache at {download_dir}")
199
+ rmtree(download_dir)
200
+
201
+ print("Test successful.")
env-llmeval/lib/python3.10/site-packages/datasets/download/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = [
2
+ "DownloadConfig",
3
+ "DownloadManager",
4
+ "DownloadMode",
5
+ "StreamingDownloadManager",
6
+ ]
7
+
8
+ from .download_config import DownloadConfig
9
+ from .download_manager import DownloadManager, DownloadMode
10
+ from .streaming_download_manager import StreamingDownloadManager
env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (431 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc ADDED
Binary file (5.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc ADDED
Binary file (37.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/download/download_config.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import warnings
3
+ from dataclasses import InitVar, dataclass, field
4
+ from pathlib import Path
5
+ from typing import Any, Dict, Optional, Union
6
+
7
+ from .. import config
8
+
9
+
10
+ @dataclass
11
+ class DownloadConfig:
12
+ """Configuration for our cached path manager.
13
+
14
+ Attributes:
15
+ cache_dir (`str` or `Path`, *optional*):
16
+ Specify a cache directory to save the file to (overwrite the
17
+ default cache dir).
18
+ force_download (`bool`, defaults to `False`):
19
+ If `True`, re-dowload the file even if it's already cached in
20
+ the cache dir.
21
+ resume_download (`bool`, defaults to `False`):
22
+ If `True`, resume the download if an incompletely received file is
23
+ found.
24
+ proxies (`dict`, *optional*):
25
+ user_agent (`str`, *optional*):
26
+ Optional string or dict that will be appended to the user-agent on remote
27
+ requests.
28
+ extract_compressed_file (`bool`, defaults to `False`):
29
+ If `True` and the path point to a zip or tar file,
30
+ extract the compressed file in a folder along the archive.
31
+ force_extract (`bool`, defaults to `False`):
32
+ If `True` when `extract_compressed_file` is `True` and the archive
33
+ was already extracted, re-extract the archive and override the folder where it was extracted.
34
+ delete_extracted (`bool`, defaults to `False`):
35
+ Whether to delete (or keep) the extracted files.
36
+ use_etag (`bool`, defaults to `True`):
37
+ Whether to use the ETag HTTP response header to validate the cached files.
38
+ num_proc (`int`, *optional*):
39
+ The number of processes to launch to download the files in parallel.
40
+ max_retries (`int`, default to `1`):
41
+ The number of times to retry an HTTP request if it fails.
42
+ token (`str` or `bool`, *optional*):
43
+ Optional string or boolean to use as Bearer token
44
+ for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
45
+ use_auth_token (`str` or `bool`, *optional*):
46
+ Optional string or boolean to use as Bearer token
47
+ for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
48
+
49
+ <Deprecated version="2.14.0">
50
+
51
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
52
+
53
+ </Deprecated>
54
+
55
+ ignore_url_params (`bool`, defaults to `False`):
56
+ Whether to strip all query parameters and fragments from
57
+ the download URL before using it for caching the file.
58
+ storage_options (`dict`, *optional*):
59
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
60
+ download_desc (`str`, *optional*):
61
+ A description to be displayed alongside with the progress bar while downloading the files.
62
+ """
63
+
64
+ cache_dir: Optional[Union[str, Path]] = None
65
+ force_download: bool = False
66
+ resume_download: bool = False
67
+ local_files_only: bool = False
68
+ proxies: Optional[Dict] = None
69
+ user_agent: Optional[str] = None
70
+ extract_compressed_file: bool = False
71
+ force_extract: bool = False
72
+ delete_extracted: bool = False
73
+ use_etag: bool = True
74
+ num_proc: Optional[int] = None
75
+ max_retries: int = 1
76
+ token: Optional[Union[str, bool]] = None
77
+ use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
78
+ ignore_url_params: bool = False
79
+ storage_options: Dict[str, Any] = field(default_factory=dict)
80
+ download_desc: Optional[str] = None
81
+
82
+ def __post_init__(self, use_auth_token):
83
+ if use_auth_token != "deprecated":
84
+ warnings.warn(
85
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
86
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
87
+ FutureWarning,
88
+ )
89
+ self.token = use_auth_token
90
+ if "hf" not in self.storage_options:
91
+ self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
92
+
93
+ def copy(self) -> "DownloadConfig":
94
+ return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
95
+
96
+ def __setattr__(self, name, value):
97
+ if name == "token" and getattr(self, "storage_options", None) is not None:
98
+ if "hf" not in self.storage_options:
99
+ self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
100
+ elif getattr(self.storage_options["hf"], "token", None) is None:
101
+ self.storage_options["hf"]["token"] = value
102
+ super().__setattr__(name, value)
env-llmeval/lib/python3.10/site-packages/datasets/download/download_manager.py ADDED
@@ -0,0 +1,584 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Download manager interface."""
17
+
18
+ import enum
19
+ import io
20
+ import os
21
+ import posixpath
22
+ import tarfile
23
+ import warnings
24
+ import zipfile
25
+ from datetime import datetime
26
+ from functools import partial
27
+ from itertools import chain
28
+ from typing import Callable, Dict, Generator, List, Optional, Tuple, Union
29
+
30
+ from .. import config
31
+ from ..utils import tqdm as hf_tqdm
32
+ from ..utils.deprecation_utils import DeprecatedEnum, deprecated
33
+ from ..utils.file_utils import (
34
+ cached_path,
35
+ get_from_cache,
36
+ hash_url_to_filename,
37
+ is_relative_path,
38
+ stack_multiprocessing_download_progress_bars,
39
+ url_or_path_join,
40
+ )
41
+ from ..utils.info_utils import get_size_checksum_dict
42
+ from ..utils.logging import get_logger
43
+ from ..utils.py_utils import NestedDataStructure, map_nested, size_str
44
+ from ..utils.track import TrackedIterable, tracked_str
45
+ from .download_config import DownloadConfig
46
+
47
+
48
+ logger = get_logger(__name__)
49
+
50
+
51
+ BASE_KNOWN_EXTENSIONS = [
52
+ "txt",
53
+ "csv",
54
+ "json",
55
+ "jsonl",
56
+ "tsv",
57
+ "conll",
58
+ "conllu",
59
+ "orig",
60
+ "parquet",
61
+ "pkl",
62
+ "pickle",
63
+ "rel",
64
+ "xml",
65
+ ]
66
+ MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = {
67
+ bytes.fromhex("504B0304"): "zip",
68
+ bytes.fromhex("504B0506"): "zip", # empty archive
69
+ bytes.fromhex("504B0708"): "zip", # spanned archive
70
+ bytes.fromhex("425A68"): "bz2",
71
+ bytes.fromhex("1F8B"): "gzip",
72
+ bytes.fromhex("FD377A585A00"): "xz",
73
+ bytes.fromhex("04224D18"): "lz4",
74
+ bytes.fromhex("28B52FFD"): "zstd",
75
+ }
76
+ MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = {
77
+ b"Rar!": "rar",
78
+ }
79
+ MAGIC_NUMBER_MAX_LENGTH = max(
80
+ len(magic_number)
81
+ for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL)
82
+ )
83
+
84
+
85
+ class DownloadMode(enum.Enum):
86
+ """`Enum` for how to treat pre-existing downloads and data.
87
+
88
+ The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
89
+ raw downloads and the prepared dataset if they exist.
90
+
91
+ The generations modes:
92
+
93
+ | | Downloads | Dataset |
94
+ |-------------------------------------|-----------|---------|
95
+ | `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse |
96
+ | `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
97
+ | `FORCE_REDOWNLOAD` | Fresh | Fresh |
98
+
99
+ """
100
+
101
+ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
102
+ REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
103
+ FORCE_REDOWNLOAD = "force_redownload"
104
+
105
+
106
+ class GenerateMode(DeprecatedEnum):
107
+ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
108
+ REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
109
+ FORCE_REDOWNLOAD = "force_redownload"
110
+
111
+ @property
112
+ def help_message(self):
113
+ return "Use 'DownloadMode' instead."
114
+
115
+
116
+ def _get_path_extension(path: str) -> str:
117
+ # Get extension: train.json.gz -> gz
118
+ extension = path.split(".")[-1]
119
+ # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz
120
+ # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt
121
+ for symb in "?-_":
122
+ extension = extension.split(symb)[0]
123
+ return extension
124
+
125
+
126
+ def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
127
+ """read the magic number from a file-like object and return the compression protocol"""
128
+ # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440)
129
+ try:
130
+ f.seek(0)
131
+ except (AttributeError, io.UnsupportedOperation):
132
+ return None
133
+ magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
134
+ f.seek(0)
135
+ for i in range(MAGIC_NUMBER_MAX_LENGTH):
136
+ compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
137
+ if compression is not None:
138
+ return compression
139
+ compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
140
+ if compression is not None:
141
+ raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
142
+
143
+
144
+ def _get_extraction_protocol(path: str) -> Optional[str]:
145
+ path = str(path)
146
+ extension = _get_path_extension(path)
147
+ # TODO(mariosasko): The below check will be useful once we can preserve the original extension in the new cache layout (use the `filename` parameter of `hf_hub_download`)
148
+ if (
149
+ extension in BASE_KNOWN_EXTENSIONS
150
+ or extension in ["tgz", "tar"]
151
+ or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz"))
152
+ ):
153
+ return None
154
+ with open(path, "rb") as f:
155
+ return _get_extraction_protocol_with_magic_number(f)
156
+
157
+
158
+ class _IterableFromGenerator(TrackedIterable):
159
+ """Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
160
+
161
+ def __init__(self, generator: Callable, *args, **kwargs):
162
+ super().__init__()
163
+ self.generator = generator
164
+ self.args = args
165
+ self.kwargs = kwargs
166
+
167
+ def __iter__(self):
168
+ for x in self.generator(*self.args, **self.kwargs):
169
+ self.last_item = x
170
+ yield x
171
+ self.last_item = None
172
+
173
+
174
+ class ArchiveIterable(_IterableFromGenerator):
175
+ """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
176
+
177
+ @staticmethod
178
+ def _iter_tar(f):
179
+ stream = tarfile.open(fileobj=f, mode="r|*")
180
+ for tarinfo in stream:
181
+ file_path = tarinfo.name
182
+ if not tarinfo.isreg():
183
+ continue
184
+ if file_path is None:
185
+ continue
186
+ if os.path.basename(file_path).startswith((".", "__")):
187
+ # skipping hidden files
188
+ continue
189
+ file_obj = stream.extractfile(tarinfo)
190
+ yield file_path, file_obj
191
+ stream.members = []
192
+ del stream
193
+
194
+ @staticmethod
195
+ def _iter_zip(f):
196
+ zipf = zipfile.ZipFile(f)
197
+ for member in zipf.infolist():
198
+ file_path = member.filename
199
+ if member.is_dir():
200
+ continue
201
+ if file_path is None:
202
+ continue
203
+ if os.path.basename(file_path).startswith((".", "__")):
204
+ # skipping hidden files
205
+ continue
206
+ file_obj = zipf.open(member)
207
+ yield file_path, file_obj
208
+
209
+ @classmethod
210
+ def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
211
+ compression = _get_extraction_protocol_with_magic_number(f)
212
+ if compression == "zip":
213
+ yield from cls._iter_zip(f)
214
+ else:
215
+ yield from cls._iter_tar(f)
216
+
217
+ @classmethod
218
+ def _iter_from_path(cls, urlpath: str) -> Generator[Tuple, None, None]:
219
+ compression = _get_extraction_protocol(urlpath)
220
+ with open(urlpath, "rb") as f:
221
+ if compression == "zip":
222
+ yield from cls._iter_zip(f)
223
+ else:
224
+ yield from cls._iter_tar(f)
225
+
226
+ @classmethod
227
+ def from_buf(cls, fileobj) -> "ArchiveIterable":
228
+ return cls(cls._iter_from_fileobj, fileobj)
229
+
230
+ @classmethod
231
+ def from_path(cls, urlpath_or_buf) -> "ArchiveIterable":
232
+ return cls(cls._iter_from_path, urlpath_or_buf)
233
+
234
+
235
+ class FilesIterable(_IterableFromGenerator):
236
+ """An iterable of paths from a list of directories or files"""
237
+
238
+ @classmethod
239
+ def _iter_from_paths(cls, urlpaths: Union[str, List[str]]) -> Generator[str, None, None]:
240
+ if not isinstance(urlpaths, list):
241
+ urlpaths = [urlpaths]
242
+ for urlpath in urlpaths:
243
+ if os.path.isfile(urlpath):
244
+ yield urlpath
245
+ else:
246
+ for dirpath, dirnames, filenames in os.walk(urlpath):
247
+ # in-place modification to prune the search
248
+ dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
249
+ if os.path.basename(dirpath).startswith((".", "__")):
250
+ # skipping hidden directories
251
+ continue
252
+ for filename in sorted(filenames):
253
+ if filename.startswith((".", "__")):
254
+ # skipping hidden files
255
+ continue
256
+ yield os.path.join(dirpath, filename)
257
+
258
+ @classmethod
259
+ def from_paths(cls, urlpaths) -> "FilesIterable":
260
+ return cls(cls._iter_from_paths, urlpaths)
261
+
262
+
263
+ class DownloadManager:
264
+ is_streaming = False
265
+
266
+ def __init__(
267
+ self,
268
+ dataset_name: Optional[str] = None,
269
+ data_dir: Optional[str] = None,
270
+ download_config: Optional[DownloadConfig] = None,
271
+ base_path: Optional[str] = None,
272
+ record_checksums=True,
273
+ ):
274
+ """Download manager constructor.
275
+
276
+ Args:
277
+ data_dir:
278
+ can be used to specify a manual directory to get the files from.
279
+ dataset_name (`str`):
280
+ name of dataset this instance will be used for. If
281
+ provided, downloads will contain which datasets they were used for.
282
+ download_config (`DownloadConfig`):
283
+ to specify the cache directory and other
284
+ download options
285
+ base_path (`str`):
286
+ base path that is used when relative paths are used to
287
+ download files. This can be a remote url.
288
+ record_checksums (`bool`, defaults to `True`):
289
+ Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder.
290
+ """
291
+ self._dataset_name = dataset_name
292
+ self._data_dir = data_dir
293
+ self._base_path = base_path or os.path.abspath(".")
294
+ # To record what is being used: {url: {num_bytes: int, checksum: str}}
295
+ self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {}
296
+ self.record_checksums = record_checksums
297
+ self.download_config = download_config or DownloadConfig()
298
+ self.downloaded_paths = {}
299
+ self.extracted_paths = {}
300
+
301
+ @property
302
+ def manual_dir(self):
303
+ return self._data_dir
304
+
305
+ @property
306
+ def downloaded_size(self):
307
+ """Returns the total size of downloaded files."""
308
+ return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
309
+
310
+ @staticmethod
311
+ def ship_files_with_pipeline(downloaded_path_or_paths, pipeline):
312
+ """Ship the files using Beam FileSystems to the pipeline temp dir.
313
+
314
+ Args:
315
+ downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`):
316
+ Nested structure containing the
317
+ downloaded path(s).
318
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
319
+ Apache Beam Pipeline.
320
+
321
+ Returns:
322
+ `str` or `list[str]` or `dict[str, str]`
323
+ """
324
+ from ..utils.beam_utils import upload_local_to_remote
325
+
326
+ remote_dir = pipeline._options.get_all_options().get("temp_location")
327
+ if remote_dir is None:
328
+ raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files")
329
+
330
+ def upload(local_file_path):
331
+ remote_file_path = posixpath.join(
332
+ remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path)
333
+ )
334
+ logger.info(
335
+ f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}."
336
+ )
337
+ upload_local_to_remote(local_file_path, remote_file_path)
338
+ return remote_file_path
339
+
340
+ uploaded_path_or_paths = map_nested(
341
+ lambda local_file_path: upload(local_file_path),
342
+ downloaded_path_or_paths,
343
+ )
344
+ return uploaded_path_or_paths
345
+
346
+ def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
347
+ """Record size/checksum of downloaded files."""
348
+ delay = 5
349
+ for url, path in hf_tqdm(
350
+ list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
351
+ delay=delay,
352
+ desc="Computing checksums",
353
+ ):
354
+ # call str to support PathLike objects
355
+ self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
356
+ path, record_checksum=self.record_checksums
357
+ )
358
+
359
+ @deprecated("Use `.download`/`.download_and_extract` with `fsspec` URLs instead.")
360
+ def download_custom(self, url_or_urls, custom_download):
361
+ """
362
+ Download given urls(s) by calling `custom_download`.
363
+
364
+ Args:
365
+ url_or_urls (`str` or `list` or `dict`):
366
+ URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
367
+ custom_download (`Callable[src_url, dst_path]`):
368
+ The source URL and destination path. For example
369
+ `tf.io.gfile.copy`, that lets you download from Google storage.
370
+
371
+ Returns:
372
+ downloaded_path(s): `str`, The downloaded paths matching the given input
373
+ `url_or_urls`.
374
+
375
+ Example:
376
+
377
+ ```py
378
+ >>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket)
379
+ ```
380
+ """
381
+ cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
382
+ max_retries = self.download_config.max_retries
383
+
384
+ def url_to_downloaded_path(url):
385
+ return os.path.join(cache_dir, hash_url_to_filename(url))
386
+
387
+ downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls)
388
+ url_or_urls = NestedDataStructure(url_or_urls)
389
+ downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
390
+ for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()):
391
+ try:
392
+ get_from_cache(
393
+ url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
394
+ )
395
+ cached = True
396
+ except FileNotFoundError:
397
+ cached = False
398
+ if not cached or self.download_config.force_download:
399
+ custom_download(url, path)
400
+ get_from_cache(
401
+ url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
402
+ )
403
+ self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
404
+ return downloaded_path_or_paths.data
405
+
406
+ def download(self, url_or_urls):
407
+ """Download given URL(s).
408
+
409
+ By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior.
410
+
411
+ Args:
412
+ url_or_urls (`str` or `list` or `dict`):
413
+ URL or `list` or `dict` of URLs to download. Each URL is a `str`.
414
+
415
+ Returns:
416
+ `str` or `list` or `dict`:
417
+ The downloaded paths matching the given input `url_or_urls`.
418
+
419
+ Example:
420
+
421
+ ```py
422
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
423
+ ```
424
+ """
425
+ download_config = self.download_config.copy()
426
+ download_config.extract_compressed_file = False
427
+ if download_config.download_desc is None:
428
+ download_config.download_desc = "Downloading data"
429
+
430
+ download_func = partial(self._download, download_config=download_config)
431
+
432
+ start_time = datetime.now()
433
+ with stack_multiprocessing_download_progress_bars():
434
+ downloaded_path_or_paths = map_nested(
435
+ download_func,
436
+ url_or_urls,
437
+ map_tuple=True,
438
+ num_proc=download_config.num_proc,
439
+ desc="Downloading data files",
440
+ )
441
+ duration = datetime.now() - start_time
442
+ logger.info(f"Downloading took {duration.total_seconds() // 60} min")
443
+ url_or_urls = NestedDataStructure(url_or_urls)
444
+ downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
445
+ self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())))
446
+
447
+ start_time = datetime.now()
448
+ self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
449
+ duration = datetime.now() - start_time
450
+ logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min")
451
+
452
+ return downloaded_path_or_paths.data
453
+
454
+ def _download(self, url_or_filename: str, download_config: DownloadConfig) -> str:
455
+ url_or_filename = str(url_or_filename)
456
+ if is_relative_path(url_or_filename):
457
+ # append the relative path to the base_path
458
+ url_or_filename = url_or_path_join(self._base_path, url_or_filename)
459
+ out = cached_path(url_or_filename, download_config=download_config)
460
+ out = tracked_str(out)
461
+ out.set_origin(url_or_filename)
462
+ return out
463
+
464
+ def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]):
465
+ """Iterate over files within an archive.
466
+
467
+ Args:
468
+ path_or_buf (`str` or `io.BufferedReader`):
469
+ Archive path or archive binary file object.
470
+
471
+ Yields:
472
+ `tuple[str, io.BufferedReader]`:
473
+ 2-tuple (path_within_archive, file_object).
474
+ File object is opened in binary mode.
475
+
476
+ Example:
477
+
478
+ ```py
479
+ >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
480
+ >>> files = dl_manager.iter_archive(archive)
481
+ ```
482
+ """
483
+
484
+ if hasattr(path_or_buf, "read"):
485
+ return ArchiveIterable.from_buf(path_or_buf)
486
+ else:
487
+ return ArchiveIterable.from_path(path_or_buf)
488
+
489
+ def iter_files(self, paths: Union[str, List[str]]):
490
+ """Iterate over file paths.
491
+
492
+ Args:
493
+ paths (`str` or `list` of `str`):
494
+ Root paths.
495
+
496
+ Yields:
497
+ `str`: File path.
498
+
499
+ Example:
500
+
501
+ ```py
502
+ >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
503
+ >>> files = dl_manager.iter_files(files)
504
+ ```
505
+ """
506
+ return FilesIterable.from_paths(paths)
507
+
508
+ def extract(self, path_or_paths, num_proc="deprecated"):
509
+ """Extract given path(s).
510
+
511
+ Args:
512
+ path_or_paths (path or `list` or `dict`):
513
+ Path of file to extract. Each path is a `str`.
514
+ num_proc (`int`):
515
+ Use multi-processing if `num_proc` > 1 and the length of
516
+ `path_or_paths` is larger than `num_proc`.
517
+
518
+ <Deprecated version="2.6.2">
519
+
520
+ Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.
521
+
522
+ </Deprecated>
523
+
524
+ Returns:
525
+ extracted_path(s): `str`, The extracted paths matching the given input
526
+ path_or_paths.
527
+
528
+ Example:
529
+
530
+ ```py
531
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
532
+ >>> extracted_files = dl_manager.extract(downloaded_files)
533
+ ```
534
+ """
535
+ if num_proc != "deprecated":
536
+ warnings.warn(
537
+ "'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.",
538
+ FutureWarning,
539
+ )
540
+ download_config = self.download_config.copy()
541
+ download_config.extract_compressed_file = True
542
+ extract_func = partial(self._download, download_config=download_config)
543
+ extracted_paths = map_nested(
544
+ extract_func,
545
+ path_or_paths,
546
+ num_proc=download_config.num_proc,
547
+ desc="Extracting data files",
548
+ )
549
+ path_or_paths = NestedDataStructure(path_or_paths)
550
+ extracted_paths = NestedDataStructure(extracted_paths)
551
+ self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten())))
552
+ return extracted_paths.data
553
+
554
+ def download_and_extract(self, url_or_urls):
555
+ """Download and extract given `url_or_urls`.
556
+
557
+ Is roughly equivalent to:
558
+
559
+ ```
560
+ extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
561
+ ```
562
+
563
+ Args:
564
+ url_or_urls (`str` or `list` or `dict`):
565
+ URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
566
+
567
+ Returns:
568
+ extracted_path(s): `str`, extracted paths of given URL(s).
569
+ """
570
+ return self.extract(self.download(url_or_urls))
571
+
572
+ def get_recorded_sizes_checksums(self):
573
+ return self._recorded_sizes_checksums.copy()
574
+
575
+ def delete_extracted_files(self):
576
+ paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values())
577
+ for key, path in list(self.extracted_paths.items()):
578
+ if path in paths_to_delete and os.path.isfile(path):
579
+ os.remove(path)
580
+ del self.extracted_paths[key]
581
+
582
+ def manage_extracted_files(self):
583
+ if self.download_config.delete_extracted:
584
+ self.delete_extracted_files()
env-llmeval/lib/python3.10/site-packages/datasets/download/mock_download_manager.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Mock download manager interface."""
17
+
18
+ import os
19
+ import re
20
+ import urllib.parse
21
+ from pathlib import Path
22
+ from typing import Callable, List, Optional, Union
23
+ from zipfile import ZipFile
24
+
25
+ from ..utils.file_utils import cached_path, hf_github_url
26
+ from ..utils.logging import get_logger
27
+ from ..utils.version import Version
28
+
29
+
30
+ logger = get_logger(__name__)
31
+
32
+
33
+ class MockDownloadManager:
34
+ dummy_file_name = "dummy_data"
35
+ datasets_scripts_dir = "datasets"
36
+ is_streaming = False
37
+
38
+ def __init__(
39
+ self,
40
+ dataset_name: str,
41
+ config: str,
42
+ version: Union[Version, str],
43
+ cache_dir: Optional[str] = None,
44
+ use_local_dummy_data: bool = False,
45
+ load_existing_dummy_data: bool = True,
46
+ download_callbacks: Optional[List[Callable]] = None,
47
+ ):
48
+ self.downloaded_size = 0
49
+ self.dataset_name = dataset_name
50
+ self.cache_dir = cache_dir
51
+ self.use_local_dummy_data = use_local_dummy_data
52
+ self.config = config
53
+ # download_callbacks take a single url as input
54
+ self.download_callbacks: List[Callable] = download_callbacks or []
55
+ # if False, it doesn't load existing files and it returns the paths of the dummy files relative
56
+ # to the dummy_data zip file root
57
+ self.load_existing_dummy_data = load_existing_dummy_data
58
+
59
+ # TODO(PVP, QL) might need to make this more general
60
+ self.version_name = str(version)
61
+ # to be downloaded
62
+ self._dummy_file = None
63
+ self._bucket_url = None
64
+
65
+ @property
66
+ def dummy_file(self):
67
+ if self._dummy_file is None:
68
+ self._dummy_file = self.download_dummy_data()
69
+ return self._dummy_file
70
+
71
+ @property
72
+ def dummy_data_folder(self):
73
+ if self.config is not None:
74
+ # structure is dummy / config_name / version_name
75
+ return os.path.join("dummy", self.config.name, self.version_name)
76
+ # structure is dummy / version_name
77
+ return os.path.join("dummy", self.version_name)
78
+
79
+ @property
80
+ def dummy_zip_file(self):
81
+ return os.path.join(self.dummy_data_folder, "dummy_data.zip")
82
+
83
+ def download_dummy_data(self):
84
+ path_to_dummy_data_dir = (
85
+ self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
86
+ )
87
+
88
+ local_path = cached_path(
89
+ path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True
90
+ )
91
+
92
+ return os.path.join(local_path, self.dummy_file_name)
93
+
94
+ @property
95
+ def local_path_to_dummy_data(self):
96
+ return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file)
97
+
98
+ @property
99
+ def github_path_to_dummy_data(self):
100
+ if self._bucket_url is None:
101
+ self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/"))
102
+ return self._bucket_url
103
+
104
+ @property
105
+ def manual_dir(self):
106
+ # return full path if its a dir
107
+ if os.path.isdir(self.dummy_file):
108
+ return self.dummy_file
109
+ # else cut off path to file -> example `xsum`.
110
+ return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1])
111
+
112
+ # this function has to be in the manager under this name so that testing works
113
+ def download_and_extract(self, data_url, *args):
114
+ if self.load_existing_dummy_data:
115
+ # dummy data is downloaded and tested
116
+ dummy_file = self.dummy_file
117
+ else:
118
+ # dummy data cannot be downloaded and only the path to dummy file is returned
119
+ dummy_file = self.dummy_file_name
120
+
121
+ # special case when data_url is a dict
122
+ if isinstance(data_url, dict):
123
+ return self.create_dummy_data_dict(dummy_file, data_url)
124
+ elif isinstance(data_url, (list, tuple)):
125
+ return self.create_dummy_data_list(dummy_file, data_url)
126
+ else:
127
+ return self.create_dummy_data_single(dummy_file, data_url)
128
+
129
+ # this function has to be in the manager under this name so that testing works
130
+ def download(self, data_url, *args):
131
+ return self.download_and_extract(data_url)
132
+
133
+ # this function has to be in the manager under this name so that testing works
134
+ def download_custom(self, data_url, custom_download):
135
+ return self.download_and_extract(data_url)
136
+
137
+ # this function has to be in the manager under this name so that testing works
138
+ def extract(self, path, *args, **kwargs):
139
+ return path
140
+
141
+ # this function has to be in the manager under this name so that testing works
142
+ def get_recorded_sizes_checksums(self):
143
+ return {}
144
+
145
+ def create_dummy_data_dict(self, path_to_dummy_data, data_url):
146
+ dummy_data_dict = {}
147
+ for key, single_urls in data_url.items():
148
+ for download_callback in self.download_callbacks:
149
+ if isinstance(single_urls, list):
150
+ for single_url in single_urls:
151
+ download_callback(single_url)
152
+ else:
153
+ single_url = single_urls
154
+ download_callback(single_url)
155
+ # we force the name of each key to be the last file / folder name of the url path
156
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
157
+ if isinstance(single_urls, list):
158
+ value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls]
159
+ else:
160
+ single_url = single_urls
161
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name))
162
+ dummy_data_dict[key] = value
163
+
164
+ # make sure that values are unique
165
+ if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
166
+ dummy_data_dict.values()
167
+ ):
168
+ # append key to value to make its name unique
169
+ dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()}
170
+
171
+ return dummy_data_dict
172
+
173
+ def create_dummy_data_list(self, path_to_dummy_data, data_url):
174
+ dummy_data_list = []
175
+ # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
176
+ is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url)
177
+ is_pubmed_records = all(
178
+ url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url
179
+ )
180
+ if data_url and (is_tf_records or is_pubmed_records):
181
+ data_url = [data_url[0]] * len(data_url)
182
+ for single_url in data_url:
183
+ for download_callback in self.download_callbacks:
184
+ download_callback(single_url)
185
+ # we force the name of each key to be the last file / folder name of the url path
186
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
187
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1]))
188
+ dummy_data_list.append(value)
189
+ return dummy_data_list
190
+
191
+ def create_dummy_data_single(self, path_to_dummy_data, data_url):
192
+ for download_callback in self.download_callbacks:
193
+ download_callback(data_url)
194
+ # we force the name of each key to be the last file / folder name of the url path
195
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
196
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1]))
197
+ if os.path.exists(value) or not self.load_existing_dummy_data:
198
+ return value
199
+ else:
200
+ # Backward compatibility, maybe deprecate at one point.
201
+ # For many datasets with single url calls to dl_manager.download_and_extract,
202
+ # the dummy_data.zip file is actually the zipped downloaded file
203
+ # while now we expected the dummy_data.zip file to be a directory containing
204
+ # the downloaded file.
205
+ return path_to_dummy_data
206
+
207
+ def delete_extracted_files(self):
208
+ pass
209
+
210
+ def manage_extracted_files(self):
211
+ pass
212
+
213
+ def iter_archive(self, path):
214
+ def _iter_archive_members(path):
215
+ # this preserves the order of the members inside the ZIP archive
216
+ dummy_parent_path = Path(self.dummy_file).parent
217
+ relative_path = path.relative_to(dummy_parent_path)
218
+ with ZipFile(self.local_path_to_dummy_data) as zip_file:
219
+ members = zip_file.namelist()
220
+ for member in members:
221
+ if member.startswith(relative_path.as_posix()):
222
+ yield dummy_parent_path.joinpath(member)
223
+
224
+ path = Path(path)
225
+ file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*")
226
+ for file_path in file_paths:
227
+ if file_path.is_file() and not file_path.name.startswith((".", "__")):
228
+ yield file_path.relative_to(path).as_posix(), file_path.open("rb")
229
+
230
+ def iter_files(self, paths):
231
+ if not isinstance(paths, list):
232
+ paths = [paths]
233
+ for path in paths:
234
+ if os.path.isfile(path):
235
+ yield path
236
+ else:
237
+ for dirpath, dirnames, filenames in os.walk(path):
238
+ if os.path.basename(dirpath).startswith((".", "__")):
239
+ continue
240
+ dirnames.sort()
241
+ for filename in sorted(filenames):
242
+ if filename.startswith((".", "__")):
243
+ continue
244
+ yield os.path.join(dirpath, filename)
env-llmeval/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py ADDED
@@ -0,0 +1,1133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import io
3
+ import os
4
+ import posixpath
5
+ import re
6
+ import tarfile
7
+ import time
8
+ import xml.dom.minidom
9
+ import zipfile
10
+ from asyncio import TimeoutError
11
+ from io import BytesIO
12
+ from itertools import chain
13
+ from pathlib import Path, PurePosixPath
14
+ from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union
15
+ from xml.etree import ElementTree as ET
16
+
17
+ import fsspec
18
+ from aiohttp.client_exceptions import ClientError
19
+ from huggingface_hub.utils import EntryNotFoundError
20
+ from packaging import version
21
+
22
+ from .. import config
23
+ from ..filesystems import COMPRESSION_FILESYSTEMS
24
+ from ..utils.file_utils import (
25
+ get_authentication_headers_for_url,
26
+ get_datasets_user_agent,
27
+ http_head,
28
+ is_local_path,
29
+ is_relative_path,
30
+ url_or_path_join,
31
+ )
32
+ from ..utils.logging import get_logger
33
+ from ..utils.py_utils import map_nested
34
+ from .download_config import DownloadConfig
35
+
36
+
37
+ logger = get_logger(__name__)
38
+
39
+ BASE_KNOWN_EXTENSIONS = [
40
+ "txt",
41
+ "csv",
42
+ "json",
43
+ "jsonl",
44
+ "tsv",
45
+ "conll",
46
+ "conllu",
47
+ "orig",
48
+ "parquet",
49
+ "pkl",
50
+ "pickle",
51
+ "rel",
52
+ "xml",
53
+ ]
54
+ COMPRESSION_EXTENSION_TO_PROTOCOL = {
55
+ # single file compression
56
+ **{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS},
57
+ # archive compression
58
+ "zip": "zip",
59
+ }
60
+ SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}
61
+ SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(?<!:):/")
62
+
63
+
64
+ MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL = {
65
+ bytes.fromhex("504B0304"): "zip",
66
+ bytes.fromhex("504B0506"): "zip", # empty archive
67
+ bytes.fromhex("504B0708"): "zip", # spanned archive
68
+ bytes.fromhex("425A68"): "bz2",
69
+ bytes.fromhex("1F8B"): "gzip",
70
+ bytes.fromhex("FD377A585A00"): "xz",
71
+ bytes.fromhex("04224D18"): "lz4",
72
+ bytes.fromhex("28B52FFD"): "zstd",
73
+ }
74
+ MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL = {
75
+ b"Rar!": "rar",
76
+ }
77
+ MAGIC_NUMBER_MAX_LENGTH = max(
78
+ len(magic_number)
79
+ for magic_number in chain(MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL, MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL)
80
+ )
81
+
82
+
83
+ class NonStreamableDatasetError(Exception):
84
+ pass
85
+
86
+
87
+ def xjoin(a, *p):
88
+ """
89
+ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls.
90
+
91
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
92
+ This is used to access files inside a zip file over http for example.
93
+
94
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
95
+ Then you can just chain the url this way:
96
+
97
+ zip://folder1/file.txt::https://host.com/archive.zip
98
+
99
+ The xjoin function allows you to apply the join on the first path of the chain.
100
+
101
+ Example::
102
+
103
+ >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt")
104
+ zip://folder1/file.txt::https://host.com/archive.zip
105
+ """
106
+ a, *b = str(a).split("::")
107
+ if is_local_path(a):
108
+ return os.path.join(a, *p)
109
+ else:
110
+ a = posixpath.join(a, *p)
111
+ return "::".join([a] + b)
112
+
113
+
114
+ def xdirname(a):
115
+ """
116
+ This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls.
117
+
118
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
119
+ This is used to access files inside a zip file over http for example.
120
+
121
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
122
+ Then you can just chain the url this way:
123
+
124
+ zip://folder1/file.txt::https://host.com/archive.zip
125
+
126
+ The xdirname function allows you to apply the dirname on the first path of the chain.
127
+
128
+ Example::
129
+
130
+ >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip")
131
+ zip://folder1::https://host.com/archive.zip
132
+ """
133
+ a, *b = str(a).split("::")
134
+ if is_local_path(a):
135
+ a = os.path.dirname(Path(a).as_posix())
136
+ else:
137
+ a = posixpath.dirname(a)
138
+ # if we end up at the root of the protocol, we get for example a = 'http:'
139
+ # so we have to fix it by adding the '//' that was removed:
140
+ if a.endswith(":"):
141
+ a += "//"
142
+ return "::".join([a] + b)
143
+
144
+
145
+ def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None):
146
+ """Extend `os.path.exists` function to support both local and remote files.
147
+
148
+ Args:
149
+ urlpath (`str`): URL path.
150
+ download_config : mainly use token or storage_options to support different platforms and auth types.
151
+
152
+ Returns:
153
+ `bool`
154
+ """
155
+
156
+ main_hop, *rest_hops = _as_str(urlpath).split("::")
157
+ if is_local_path(main_hop):
158
+ return os.path.exists(main_hop)
159
+ else:
160
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
161
+ main_hop, *rest_hops = urlpath.split("::")
162
+ fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
163
+ return fs.exists(main_hop)
164
+
165
+
166
+ def xbasename(a):
167
+ """
168
+ This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls.
169
+
170
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
171
+ This is used to access files inside a zip file over http for example.
172
+
173
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
174
+ Then you can just chain the url this way:
175
+
176
+ zip://folder1/file.txt::https://host.com/archive.zip
177
+
178
+ The xbasename function allows you to apply the basename on the first path of the chain.
179
+
180
+ Example::
181
+
182
+ >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip")
183
+ file.txt
184
+ """
185
+ a, *b = str(a).split("::")
186
+ if is_local_path(a):
187
+ return os.path.basename(Path(a).as_posix())
188
+ else:
189
+ return posixpath.basename(a)
190
+
191
+
192
+ def xsplit(a):
193
+ """
194
+ This function extends os.path.split to support the "::" hop separator. It supports both paths and urls.
195
+
196
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
197
+ This is used to access files inside a zip file over http for example.
198
+
199
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
200
+ Then you can just chain the url this way:
201
+
202
+ zip://folder1/file.txt::https://host.com/archive.zip
203
+
204
+ The xsplit function allows you to apply the xsplit on the first path of the chain.
205
+
206
+ Example::
207
+
208
+ >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip")
209
+ ('zip://folder1::https://host.com/archive.zip', 'file.txt')
210
+ """
211
+ a, *b = str(a).split("::")
212
+ if is_local_path(a):
213
+ return os.path.split(Path(a).as_posix())
214
+ else:
215
+ a, tail = posixpath.split(a)
216
+ return "::".join([a + "//" if a.endswith(":") else a] + b), tail
217
+
218
+
219
+ def xsplitext(a):
220
+ """
221
+ This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls.
222
+
223
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
224
+ This is used to access files inside a zip file over http for example.
225
+
226
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
227
+ Then you can just chain the url this way:
228
+
229
+ zip://folder1/file.txt::https://host.com/archive.zip
230
+
231
+ The xsplitext function allows you to apply the splitext on the first path of the chain.
232
+
233
+ Example::
234
+
235
+ >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip")
236
+ ('zip://folder1/file::https://host.com/archive.zip', '.txt')
237
+ """
238
+ a, *b = str(a).split("::")
239
+ if is_local_path(a):
240
+ return os.path.splitext(Path(a).as_posix())
241
+ else:
242
+ a, ext = posixpath.splitext(a)
243
+ return "::".join([a] + b), ext
244
+
245
+
246
+ def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool:
247
+ """Extend `os.path.isfile` function to support remote files.
248
+
249
+ Args:
250
+ path (`str`): URL path.
251
+ download_config : mainly use token or storage_options to support different platforms and auth types.
252
+
253
+ Returns:
254
+ `bool`
255
+ """
256
+ main_hop, *rest_hops = str(path).split("::")
257
+ if is_local_path(main_hop):
258
+ return os.path.isfile(path)
259
+ else:
260
+ path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
261
+ main_hop, *rest_hops = path.split("::")
262
+ fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
263
+ return fs.isfile(main_hop)
264
+
265
+
266
+ def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int:
267
+ """Extend `os.path.getsize` function to support remote files.
268
+
269
+ Args:
270
+ path (`str`): URL path.
271
+ download_config : mainly use token or storage_options to support different platforms and auth types.
272
+
273
+ Returns:
274
+ `int`: optional
275
+ """
276
+ main_hop, *rest_hops = str(path).split("::")
277
+ if is_local_path(main_hop):
278
+ return os.path.getsize(path)
279
+ else:
280
+ path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
281
+ main_hop, *rest_hops = path.split("::")
282
+ fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
283
+ try:
284
+ size = fs.size(main_hop)
285
+ except EntryNotFoundError:
286
+ raise FileNotFoundError(f"No such file: {path}")
287
+ if size is None:
288
+ # use xopen instead of fs.open to make data fetching more robust
289
+ with xopen(path, download_config=download_config) as f:
290
+ size = len(f.read())
291
+ return size
292
+
293
+
294
+ def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool:
295
+ """Extend `os.path.isdir` function to support remote files.
296
+
297
+ Args:
298
+ path (`str`): URL path.
299
+ download_config : mainly use token or storage_options to support different platforms and auth types.
300
+
301
+ Returns:
302
+ `bool`
303
+ """
304
+ main_hop, *rest_hops = str(path).split("::")
305
+ if is_local_path(main_hop):
306
+ return os.path.isdir(path)
307
+ else:
308
+ path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
309
+ main_hop, *rest_hops = path.split("::")
310
+ fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
311
+ inner_path = main_hop.split("://")[-1]
312
+ if not inner_path.strip("/"):
313
+ return True
314
+ return fs.isdir(inner_path)
315
+
316
+
317
+ def xrelpath(path, start=None):
318
+ """Extend `os.path.relpath` function to support remote files.
319
+
320
+ Args:
321
+ path (`str`): URL path.
322
+ start (`str`): Start URL directory path.
323
+
324
+ Returns:
325
+ `str`
326
+ """
327
+ main_hop, *rest_hops = str(path).split("::")
328
+ if is_local_path(main_hop):
329
+ return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop)
330
+ else:
331
+ return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop)
332
+
333
+
334
+ def _add_retries_to_file_obj_read_method(file_obj):
335
+ read = file_obj.read
336
+ max_retries = config.STREAMING_READ_MAX_RETRIES
337
+
338
+ def read_with_retries(*args, **kwargs):
339
+ disconnect_err = None
340
+ for retry in range(1, max_retries + 1):
341
+ try:
342
+ out = read(*args, **kwargs)
343
+ break
344
+ except (ClientError, TimeoutError) as err:
345
+ disconnect_err = err
346
+ logger.warning(
347
+ f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]"
348
+ )
349
+ time.sleep(config.STREAMING_READ_RETRY_INTERVAL)
350
+ else:
351
+ raise ConnectionError("Server Disconnected") from disconnect_err
352
+ return out
353
+
354
+ file_obj.read = read_with_retries
355
+
356
+
357
+ def _get_path_extension(path: str) -> str:
358
+ # Get extension: https://foo.bar/train.json.gz -> gz
359
+ extension = path.split(".")[-1]
360
+ # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz
361
+ # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt
362
+ for symb in "?-_":
363
+ extension = extension.split(symb)[0]
364
+ return extension
365
+
366
+
367
+ def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
368
+ """read the magic number from a file-like object and return the compression protocol"""
369
+ # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440)
370
+ try:
371
+ f.seek(0)
372
+ except (AttributeError, io.UnsupportedOperation):
373
+ return None
374
+ magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
375
+ f.seek(0)
376
+ for i in range(MAGIC_NUMBER_MAX_LENGTH):
377
+ compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
378
+ if compression is not None:
379
+ return compression
380
+ compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
381
+ if compression is not None:
382
+ raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
383
+
384
+
385
+ def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]:
386
+ # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
387
+ urlpath = str(urlpath)
388
+ path = urlpath.split("::")[0]
389
+ extension = _get_path_extension(path)
390
+ if (
391
+ extension in BASE_KNOWN_EXTENSIONS
392
+ or extension in ["tgz", "tar"]
393
+ or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz"))
394
+ ):
395
+ return None
396
+ elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL:
397
+ return COMPRESSION_EXTENSION_TO_PROTOCOL[extension]
398
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
399
+ try:
400
+ with fsspec.open(urlpath, **(storage_options or {})) as f:
401
+ return _get_extraction_protocol_with_magic_number(f)
402
+ except FileNotFoundError:
403
+ if urlpath.startswith(config.HF_ENDPOINT):
404
+ raise FileNotFoundError(
405
+ urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`."
406
+ ) from None
407
+ else:
408
+ raise
409
+
410
+
411
+ def _prepare_path_and_storage_options(
412
+ urlpath: str, download_config: Optional[DownloadConfig] = None
413
+ ) -> Tuple[str, Dict[str, Dict[str, Any]]]:
414
+ prepared_urlpath = []
415
+ prepared_storage_options = {}
416
+ for hop in urlpath.split("::"):
417
+ hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config)
418
+ prepared_urlpath.append(hop)
419
+ prepared_storage_options.update(storage_options)
420
+ return "::".join(prepared_urlpath), storage_options
421
+
422
+
423
+ def _prepare_single_hop_path_and_storage_options(
424
+ urlpath: str, download_config: Optional[DownloadConfig] = None
425
+ ) -> Tuple[str, Dict[str, Dict[str, Any]]]:
426
+ """
427
+ Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head
428
+
429
+ In particular it resolves google drive URLs
430
+ It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths.
431
+
432
+ Storage options are formatted in the form {protocol: storage_options_for_protocol}
433
+ """
434
+ token = None if download_config is None else download_config.token
435
+ if urlpath.startswith(config.HF_ENDPOINT) and "/resolve/" in urlpath:
436
+ urlpath = "hf://" + urlpath[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
437
+ protocol = urlpath.split("://")[0] if "://" in urlpath else "file"
438
+ if download_config is not None and protocol in download_config.storage_options:
439
+ storage_options = download_config.storage_options[protocol]
440
+ elif download_config is not None and protocol not in download_config.storage_options:
441
+ storage_options = {
442
+ option_name: option_value
443
+ for option_name, option_value in download_config.storage_options.items()
444
+ if option_name not in fsspec.available_protocols()
445
+ }
446
+ else:
447
+ storage_options = {}
448
+ if storage_options:
449
+ storage_options = {protocol: storage_options}
450
+ if protocol in ["http", "https"]:
451
+ storage_options[protocol] = {
452
+ "headers": {
453
+ **get_authentication_headers_for_url(urlpath, token=token),
454
+ "user-agent": get_datasets_user_agent(),
455
+ },
456
+ "client_kwargs": {"trust_env": True}, # Enable reading proxy env variables.
457
+ **(storage_options.get(protocol, {})),
458
+ }
459
+ if "drive.google.com" in urlpath:
460
+ response = http_head(urlpath)
461
+ cookies = None
462
+ for k, v in response.cookies.items():
463
+ if k.startswith("download_warning"):
464
+ urlpath += "&confirm=" + v
465
+ cookies = response.cookies
466
+ storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})}
467
+ # Fix Google Drive URL to avoid Virus scan warning
468
+ if "drive.google.com" in urlpath and "confirm=" not in urlpath:
469
+ urlpath += "&confirm=t"
470
+ if urlpath.startswith("https://raw.githubusercontent.com/"):
471
+ # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389
472
+ storage_options[protocol]["headers"]["Accept-Encoding"] = "identity"
473
+ elif protocol == "hf":
474
+ storage_options[protocol] = {
475
+ "token": token,
476
+ "endpoint": config.HF_ENDPOINT,
477
+ **storage_options.get(protocol, {}),
478
+ }
479
+ # streaming with block_size=0 is only implemented in 0.21 (see https://github.com/huggingface/huggingface_hub/pull/1967)
480
+ if config.HF_HUB_VERSION < version.parse("0.21.0"):
481
+ storage_options[protocol]["block_size"] = "default"
482
+ return urlpath, storage_options
483
+
484
+
485
+ def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs):
486
+ """Extend `open` function to support remote files using `fsspec`.
487
+
488
+ It also has a retry mechanism in case connection fails.
489
+ The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co
490
+
491
+ Args:
492
+ file (`str`): Path name of the file to be opened.
493
+ mode (`str`, *optional*, default "r"): Mode in which the file is opened.
494
+ *args: Arguments to be passed to `fsspec.open`.
495
+ download_config : mainly use token or storage_options to support different platforms and auth types.
496
+ **kwargs: Keyword arguments to be passed to `fsspec.open`.
497
+
498
+ Returns:
499
+ file object
500
+ """
501
+ # This works as well for `xopen(str(Path(...)))`
502
+ file_str = _as_str(file)
503
+ main_hop, *rest_hops = file_str.split("::")
504
+ if is_local_path(main_hop):
505
+ # ignore fsspec-specific kwargs
506
+ kwargs.pop("block_size", None)
507
+ return open(main_hop, mode, *args, **kwargs)
508
+ # add headers and cookies for authentication on the HF Hub and for Google Drive
509
+ file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config)
510
+ kwargs = {**kwargs, **(storage_options or {})}
511
+ try:
512
+ file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()
513
+ except ValueError as e:
514
+ if str(e) == "Cannot seek streaming HTTP file":
515
+ raise NonStreamableDatasetError(
516
+ "Streaming is not possible for this dataset because data host server doesn't support HTTP range "
517
+ "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)"
518
+ ) from e
519
+ else:
520
+ raise
521
+ except FileNotFoundError:
522
+ if file.startswith(config.HF_ENDPOINT):
523
+ raise FileNotFoundError(
524
+ file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`."
525
+ ) from None
526
+ else:
527
+ raise
528
+ _add_retries_to_file_obj_read_method(file_obj)
529
+ return file_obj
530
+
531
+
532
+ def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]:
533
+ """Extend `os.listdir` function to support remote files.
534
+
535
+ Args:
536
+ path (`str`): URL path.
537
+ download_config : mainly use token or storage_options to support different platforms and auth types.
538
+
539
+ Returns:
540
+ `list` of `str`
541
+ """
542
+ main_hop, *rest_hops = _as_str(path).split("::")
543
+ if is_local_path(main_hop):
544
+ return os.listdir(path)
545
+ else:
546
+ # globbing inside a zip in a private repo requires authentication
547
+ path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
548
+ main_hop, *rest_hops = path.split("::")
549
+ fs, *_ = fsspec.get_fs_token_paths(path, storage_options=storage_options)
550
+ inner_path = main_hop.split("://")[-1]
551
+ if inner_path.strip("/") and not fs.isdir(inner_path):
552
+ raise FileNotFoundError(f"Directory doesn't exist: {path}")
553
+ paths = fs.listdir(inner_path, detail=False)
554
+ return [os.path.basename(path.rstrip("/")) for path in paths]
555
+
556
+
557
+ def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None):
558
+ """Extend `glob.glob` function to support remote files.
559
+
560
+ Args:
561
+ urlpath (`str`): URL path with shell-style wildcard patterns.
562
+ recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more
563
+ directories or subdirectories.
564
+ download_config : mainly use token or storage_options to support different platforms and auth types.
565
+
566
+ Returns:
567
+ `list` of `str`
568
+ """
569
+ main_hop, *rest_hops = _as_str(urlpath).split("::")
570
+ if is_local_path(main_hop):
571
+ return glob.glob(main_hop, recursive=recursive)
572
+ else:
573
+ # globbing inside a zip in a private repo requires authentication
574
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
575
+ main_hop, *rest_hops = urlpath.split("::")
576
+ fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
577
+ # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching
578
+ # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`.
579
+ # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories.
580
+ # - If there is "**" in the pattern, `fs.glob` must be called anyway.
581
+ inner_path = main_hop.split("://")[1]
582
+ globbed_paths = fs.glob(inner_path)
583
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1]
584
+ return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths]
585
+
586
+
587
+ def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs):
588
+ """Extend `os.walk` function to support remote files.
589
+
590
+ Args:
591
+ urlpath (`str`): URL root path.
592
+ download_config : mainly use token or storage_options to support different platforms and auth types.
593
+ **kwargs: Additional keyword arguments forwarded to the underlying filesystem.
594
+
595
+
596
+ Yields:
597
+ `tuple`: 3-tuple (dirpath, dirnames, filenames).
598
+ """
599
+ main_hop, *rest_hops = _as_str(urlpath).split("::")
600
+ if is_local_path(main_hop):
601
+ yield from os.walk(main_hop, **kwargs)
602
+ else:
603
+ # walking inside a zip in a private repo requires authentication
604
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
605
+ main_hop, *rest_hops = urlpath.split("::")
606
+ fs, *_ = fsspec.get_fs_token_paths(urlpath, storage_options=storage_options)
607
+ inner_path = main_hop.split("://")[-1]
608
+ if inner_path.strip("/") and not fs.isdir(inner_path):
609
+ return []
610
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1]
611
+ for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs):
612
+ yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames
613
+
614
+
615
+ class xPath(type(Path())):
616
+ """Extension of `pathlib.Path` to support both local paths and remote URLs."""
617
+
618
+ def __str__(self):
619
+ path_str = super().__str__()
620
+ main_hop, *rest_hops = path_str.split("::")
621
+ if is_local_path(main_hop):
622
+ return main_hop
623
+ path_as_posix = path_str.replace("\\", "/")
624
+ path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix)
625
+ path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol
626
+ return path_as_posix
627
+
628
+ def exists(self, download_config: Optional[DownloadConfig] = None):
629
+ """Extend `pathlib.Path.exists` method to support both local and remote files.
630
+
631
+ Args:
632
+ download_config : mainly use token or storage_options to support different platforms and auth types.
633
+
634
+ Returns:
635
+ `bool`
636
+ """
637
+ return xexists(str(self), download_config=download_config)
638
+
639
+ def glob(self, pattern, download_config: Optional[DownloadConfig] = None):
640
+ """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
641
+
642
+ Args:
643
+ pattern (`str`): Pattern that resulting paths must match.
644
+ download_config : mainly use token or storage_options to support different platforms and auth types.
645
+
646
+ Yields:
647
+ [`xPath`]
648
+ """
649
+ posix_path = self.as_posix()
650
+ main_hop, *rest_hops = posix_path.split("::")
651
+ if is_local_path(main_hop):
652
+ yield from Path(main_hop).glob(pattern)
653
+ else:
654
+ # globbing inside a zip in a private repo requires authentication
655
+ if rest_hops:
656
+ urlpath = rest_hops[0]
657
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
658
+ storage_options = {urlpath.split("://")[0]: storage_options}
659
+ posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]])
660
+ else:
661
+ storage_options = None
662
+ fs, *_ = fsspec.get_fs_token_paths(xjoin(posix_path, pattern), storage_options=storage_options)
663
+ # - If there's no "*" in the pattern, get_fs_token_paths() doesn't do any pattern matching
664
+ # so to be able to glob patterns like "[0-9]", we have to call `fs.glob`.
665
+ # - Also "*" in get_fs_token_paths() only matches files: we have to call `fs.glob` to match directories.
666
+ # - If there is "**" in the pattern, `fs.glob` must be called anyway.
667
+ globbed_paths = fs.glob(xjoin(main_hop, pattern))
668
+ for globbed_path in globbed_paths:
669
+ yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops))
670
+
671
+ def rglob(self, pattern, **kwargs):
672
+ """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
673
+
674
+ Args:
675
+ pattern (`str`): Pattern that resulting paths must match.
676
+
677
+ Yields:
678
+ [`xPath`]
679
+ """
680
+ return self.glob("**/" + pattern, **kwargs)
681
+
682
+ @property
683
+ def parent(self) -> "xPath":
684
+ """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
685
+
686
+ Returns:
687
+ [`xPath`]
688
+ """
689
+ return type(self)(xdirname(self.as_posix()))
690
+
691
+ @property
692
+ def name(self) -> str:
693
+ """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
694
+
695
+ Returns:
696
+ `str`
697
+ """
698
+ return PurePosixPath(self.as_posix().split("::")[0]).name
699
+
700
+ @property
701
+ def stem(self) -> str:
702
+ """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
703
+
704
+ Returns:
705
+ `str`
706
+ """
707
+ return PurePosixPath(self.as_posix().split("::")[0]).stem
708
+
709
+ @property
710
+ def suffix(self) -> str:
711
+ """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
712
+
713
+ Returns:
714
+ `str`
715
+ """
716
+ return PurePosixPath(self.as_posix().split("::")[0]).suffix
717
+
718
+ def open(self, *args, **kwargs):
719
+ """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`.
720
+
721
+ Args:
722
+ **args: Arguments passed to :func:`fsspec.open`.
723
+ **kwargs: Keyword arguments passed to :func:`fsspec.open`.
724
+
725
+ Returns:
726
+ `io.FileIO`: File-like object.
727
+ """
728
+ return xopen(str(self), *args, **kwargs)
729
+
730
+ def joinpath(self, *p: Tuple[str, ...]) -> "xPath":
731
+ """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`.
732
+
733
+ Args:
734
+ *p (`tuple` of `str`): Other path components.
735
+
736
+ Returns:
737
+ [`xPath`]
738
+ """
739
+ return type(self)(xjoin(self.as_posix(), *p))
740
+
741
+ def __truediv__(self, p: str) -> "xPath":
742
+ return self.joinpath(p)
743
+
744
+ def with_suffix(self, suffix):
745
+ main_hop, *rest_hops = str(self).split("::")
746
+ if is_local_path(main_hop):
747
+ return type(self)(str(super().with_suffix(suffix)))
748
+ return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops))
749
+
750
+
751
+ def _as_str(path: Union[str, Path, xPath]):
752
+ return str(path) if isinstance(path, xPath) else str(xPath(str(path)))
753
+
754
+
755
+ def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs):
756
+ import gzip
757
+
758
+ if hasattr(filepath_or_buffer, "read"):
759
+ return gzip.open(filepath_or_buffer, *args, **kwargs)
760
+ else:
761
+ filepath_or_buffer = str(filepath_or_buffer)
762
+ return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs)
763
+
764
+
765
+ def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs):
766
+ import numpy as np
767
+
768
+ if hasattr(filepath_or_buffer, "read"):
769
+ return np.load(filepath_or_buffer, *args, **kwargs)
770
+ else:
771
+ filepath_or_buffer = str(filepath_or_buffer)
772
+ return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs)
773
+
774
+
775
+ def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
776
+ import pandas as pd
777
+
778
+ if hasattr(filepath_or_buffer, "read"):
779
+ return pd.read_csv(filepath_or_buffer, **kwargs)
780
+ else:
781
+ filepath_or_buffer = str(filepath_or_buffer)
782
+ if kwargs.get("compression", "infer") == "infer":
783
+ kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config)
784
+ return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
785
+
786
+
787
+ def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
788
+ import pandas as pd
789
+
790
+ if hasattr(filepath_or_buffer, "read"):
791
+ try:
792
+ return pd.read_excel(filepath_or_buffer, **kwargs)
793
+ except ValueError: # Cannot seek streaming HTTP file
794
+ return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs)
795
+ else:
796
+ filepath_or_buffer = str(filepath_or_buffer)
797
+ try:
798
+ return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
799
+ except ValueError: # Cannot seek streaming HTTP file
800
+ return pd.read_excel(
801
+ BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs
802
+ )
803
+
804
+
805
+ def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
806
+ import pyarrow.parquet as pq
807
+
808
+ if hasattr(filepath_or_buffer, "read"):
809
+ return pq.read_table(filepath_or_buffer, **kwargs)
810
+ else:
811
+ filepath_or_buffer = str(filepath_or_buffer)
812
+ return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs)
813
+
814
+
815
+ def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
816
+ import scipy.io as sio
817
+
818
+ if hasattr(filepath_or_buffer, "read"):
819
+ return sio.loadmat(filepath_or_buffer, **kwargs)
820
+ else:
821
+ return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
822
+
823
+
824
+ def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None):
825
+ """Extend `xml.etree.ElementTree.parse` function to support remote files.
826
+
827
+ Args:
828
+ source: File path or file object.
829
+ parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance.
830
+ download_config : mainly use token or storage_options to support different platforms and auth types.
831
+
832
+ Returns:
833
+ `xml.etree.ElementTree.Element`: Root element of the given source document.
834
+ """
835
+ if hasattr(source, "read"):
836
+ return ET.parse(source, parser=parser)
837
+ else:
838
+ with xopen(source, "rb", download_config=download_config) as f:
839
+ return ET.parse(f, parser=parser)
840
+
841
+
842
+ def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs):
843
+ """Extend `xml.dom.minidom.parse` function to support remote files.
844
+
845
+ Args:
846
+ filename_or_file (`str` or file): File path or file object.
847
+ download_config : mainly use token or storage_options to support different platforms and auth types.
848
+ **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`.
849
+
850
+ Returns:
851
+ :obj:`xml.dom.minidom.Document`: Parsed document.
852
+ """
853
+ if hasattr(filename_or_file, "read"):
854
+ return xml.dom.minidom.parse(filename_or_file, **kwargs)
855
+ else:
856
+ with xopen(filename_or_file, "rb", download_config=download_config) as f:
857
+ return xml.dom.minidom.parse(f, **kwargs)
858
+
859
+
860
+ class _IterableFromGenerator(Iterable):
861
+ """Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
862
+
863
+ def __init__(self, generator: Callable, *args, **kwargs):
864
+ self.generator = generator
865
+ self.args = args
866
+ self.kwargs = kwargs
867
+
868
+ def __iter__(self):
869
+ yield from self.generator(*self.args, **self.kwargs)
870
+
871
+
872
+ class ArchiveIterable(_IterableFromGenerator):
873
+ """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
874
+
875
+ @staticmethod
876
+ def _iter_tar(f):
877
+ stream = tarfile.open(fileobj=f, mode="r|*")
878
+ for tarinfo in stream:
879
+ file_path = tarinfo.name
880
+ if not tarinfo.isreg():
881
+ continue
882
+ if file_path is None:
883
+ continue
884
+ if os.path.basename(file_path).startswith((".", "__")):
885
+ # skipping hidden files
886
+ continue
887
+ file_obj = stream.extractfile(tarinfo)
888
+ yield file_path, file_obj
889
+ stream.members = []
890
+ del stream
891
+
892
+ @staticmethod
893
+ def _iter_zip(f):
894
+ zipf = zipfile.ZipFile(f)
895
+ for member in zipf.infolist():
896
+ file_path = member.filename
897
+ if member.is_dir():
898
+ continue
899
+ if file_path is None:
900
+ continue
901
+ if os.path.basename(file_path).startswith((".", "__")):
902
+ # skipping hidden files
903
+ continue
904
+ file_obj = zipf.open(member)
905
+ yield file_path, file_obj
906
+
907
+ @classmethod
908
+ def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
909
+ compression = _get_extraction_protocol_with_magic_number(f)
910
+ if compression == "zip":
911
+ yield from cls._iter_zip(f)
912
+ else:
913
+ yield from cls._iter_tar(f)
914
+
915
+ @classmethod
916
+ def _iter_from_urlpath(
917
+ cls, urlpath: str, download_config: Optional[DownloadConfig] = None
918
+ ) -> Generator[Tuple, None, None]:
919
+ compression = _get_extraction_protocol(urlpath, download_config=download_config)
920
+ # Set block_size=0 to get faster streaming
921
+ # (e.g. for hf:// and https:// it uses streaming Requests file-like instances)
922
+ with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f:
923
+ if compression == "zip":
924
+ yield from cls._iter_zip(f)
925
+ else:
926
+ yield from cls._iter_tar(f)
927
+
928
+ @classmethod
929
+ def from_buf(cls, fileobj) -> "ArchiveIterable":
930
+ return cls(cls._iter_from_fileobj, fileobj)
931
+
932
+ @classmethod
933
+ def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable":
934
+ return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config)
935
+
936
+
937
+ class FilesIterable(_IterableFromGenerator):
938
+ """An iterable of paths from a list of directories or files"""
939
+
940
+ @classmethod
941
+ def _iter_from_urlpaths(
942
+ cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None
943
+ ) -> Generator[str, None, None]:
944
+ if not isinstance(urlpaths, list):
945
+ urlpaths = [urlpaths]
946
+ for urlpath in urlpaths:
947
+ if xisfile(urlpath, download_config=download_config):
948
+ yield urlpath
949
+ elif xisdir(urlpath, download_config=download_config):
950
+ for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config):
951
+ # in-place modification to prune the search
952
+ dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
953
+ if xbasename(dirpath).startswith((".", "__")):
954
+ # skipping hidden directories
955
+ continue
956
+ for filename in sorted(filenames):
957
+ if filename.startswith((".", "__")):
958
+ # skipping hidden files
959
+ continue
960
+ yield xjoin(dirpath, filename)
961
+ else:
962
+ raise FileNotFoundError(urlpath)
963
+
964
+ @classmethod
965
+ def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable":
966
+ return cls(cls._iter_from_urlpaths, urlpaths, download_config)
967
+
968
+
969
+ class StreamingDownloadManager:
970
+ """
971
+ Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
972
+ Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
973
+ data, but they rather return the path or url that could be opened using the `xopen` function which extends the
974
+ built-in `open` function to stream data from remote files.
975
+ """
976
+
977
+ is_streaming = True
978
+
979
+ def __init__(
980
+ self,
981
+ dataset_name: Optional[str] = None,
982
+ data_dir: Optional[str] = None,
983
+ download_config: Optional[DownloadConfig] = None,
984
+ base_path: Optional[str] = None,
985
+ ):
986
+ self._dataset_name = dataset_name
987
+ self._data_dir = data_dir
988
+ self._base_path = base_path or os.path.abspath(".")
989
+ self.download_config = download_config or DownloadConfig()
990
+
991
+ @property
992
+ def manual_dir(self):
993
+ return self._data_dir
994
+
995
+ def download(self, url_or_urls):
996
+ """Normalize URL(s) of files to stream data from.
997
+ This is the lazy version of `DownloadManager.download` for streaming.
998
+
999
+ Args:
1000
+ url_or_urls (`str` or `list` or `dict`):
1001
+ URL(s) of files to stream data from. Each url is a `str`.
1002
+
1003
+ Returns:
1004
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
1005
+
1006
+ Example:
1007
+
1008
+ ```py
1009
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
1010
+ ```
1011
+ """
1012
+ url_or_urls = map_nested(self._download, url_or_urls, map_tuple=True)
1013
+ return url_or_urls
1014
+
1015
+ def _download(self, urlpath: str) -> str:
1016
+ urlpath = str(urlpath)
1017
+ if is_relative_path(urlpath):
1018
+ # append the relative path to the base_path
1019
+ urlpath = url_or_path_join(self._base_path, urlpath)
1020
+ return urlpath
1021
+
1022
+ def extract(self, url_or_urls):
1023
+ """Add extraction protocol for given url(s) for streaming.
1024
+
1025
+ This is the lazy version of `DownloadManager.extract` for streaming.
1026
+
1027
+ Args:
1028
+ url_or_urls (`str` or `list` or `dict`):
1029
+ URL(s) of files to stream data from. Each url is a `str`.
1030
+
1031
+ Returns:
1032
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
1033
+
1034
+ Example:
1035
+
1036
+ ```py
1037
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
1038
+ >>> extracted_files = dl_manager.extract(downloaded_files)
1039
+ ```
1040
+ """
1041
+ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
1042
+ return urlpaths
1043
+
1044
+ def _extract(self, urlpath: str) -> str:
1045
+ urlpath = str(urlpath)
1046
+ protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
1047
+ # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
1048
+ path = urlpath.split("::")[0]
1049
+ extension = _get_path_extension(path)
1050
+ if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
1051
+ raise NotImplementedError(
1052
+ f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
1053
+ f"Please use `dl_manager.iter_archive` instead.\n\n"
1054
+ f"Example usage:\n\n"
1055
+ f"\turl = dl_manager.download(url)\n"
1056
+ f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
1057
+ f"\tfor filename, file in tar_archive_iterator:\n"
1058
+ f"\t\t..."
1059
+ )
1060
+ if protocol is None:
1061
+ # no extraction
1062
+ return urlpath
1063
+ elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
1064
+ # there is one single file which is the uncompressed file
1065
+ inner_file = os.path.basename(urlpath.split("::")[0])
1066
+ inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
1067
+ return f"{protocol}://{inner_file}::{urlpath}"
1068
+ else:
1069
+ return f"{protocol}://::{urlpath}"
1070
+
1071
+ def download_and_extract(self, url_or_urls):
1072
+ """Prepare given `url_or_urls` for streaming (add extraction protocol).
1073
+
1074
+ This is the lazy version of `DownloadManager.download_and_extract` for streaming.
1075
+
1076
+ Is equivalent to:
1077
+
1078
+ ```
1079
+ urls = dl_manager.extract(dl_manager.download(url_or_urls))
1080
+ ```
1081
+
1082
+ Args:
1083
+ url_or_urls (`str` or `list` or `dict`):
1084
+ URL(s) to stream from data from. Each url is a `str`.
1085
+
1086
+ Returns:
1087
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
1088
+ """
1089
+ return self.extract(self.download(url_or_urls))
1090
+
1091
+ def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
1092
+ """Iterate over files within an archive.
1093
+
1094
+ Args:
1095
+ urlpath_or_buf (`str` or `io.BufferedReader`):
1096
+ Archive path or archive binary file object.
1097
+
1098
+ Yields:
1099
+ `tuple[str, io.BufferedReader]`:
1100
+ 2-tuple (path_within_archive, file_object).
1101
+ File object is opened in binary mode.
1102
+
1103
+ Example:
1104
+
1105
+ ```py
1106
+ >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
1107
+ >>> files = dl_manager.iter_archive(archive)
1108
+ ```
1109
+ """
1110
+
1111
+ if hasattr(urlpath_or_buf, "read"):
1112
+ return ArchiveIterable.from_buf(urlpath_or_buf)
1113
+ else:
1114
+ return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
1115
+
1116
+ def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
1117
+ """Iterate over files.
1118
+
1119
+ Args:
1120
+ urlpaths (`str` or `list` of `str`):
1121
+ Root paths.
1122
+
1123
+ Yields:
1124
+ str: File URL path.
1125
+
1126
+ Example:
1127
+
1128
+ ```py
1129
+ >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
1130
+ >>> files = dl_manager.iter_files(files)
1131
+ ```
1132
+ """
1133
+ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
env-llmeval/lib/python3.10/site-packages/datasets/formatting/__init__.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # ruff: noqa
16
+
17
+ from typing import Dict, List, Optional, Type
18
+
19
+ from .. import config
20
+ from ..utils import logging
21
+ from .formatting import (
22
+ ArrowFormatter,
23
+ CustomFormatter,
24
+ Formatter,
25
+ PandasFormatter,
26
+ PythonFormatter,
27
+ TensorFormatter,
28
+ format_table,
29
+ query_table,
30
+ )
31
+ from .np_formatter import NumpyFormatter
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+ _FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {}
37
+ _FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {}
38
+ _FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {}
39
+
40
+
41
+ def _register_formatter(
42
+ formatter_cls: type,
43
+ format_type: Optional[str],
44
+ aliases: Optional[List[str]] = None,
45
+ ):
46
+ """
47
+ Register a Formatter object using a name and optional aliases.
48
+ This function must be used on a Formatter class.
49
+ """
50
+ aliases = aliases if aliases is not None else []
51
+ if format_type in _FORMAT_TYPES:
52
+ logger.warning(
53
+ f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})"
54
+ )
55
+ _FORMAT_TYPES[format_type] = formatter_cls
56
+ for alias in set(aliases + [format_type]):
57
+ if alias in _FORMAT_TYPES_ALIASES:
58
+ logger.warning(
59
+ f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})"
60
+ )
61
+ _FORMAT_TYPES_ALIASES[alias] = format_type
62
+
63
+
64
+ def _register_unavailable_formatter(
65
+ unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None
66
+ ):
67
+ """
68
+ Register an unavailable Formatter object using a name and optional aliases.
69
+ This function must be used on an Exception object that is raised when trying to get the unavailable formatter.
70
+ """
71
+ aliases = aliases if aliases is not None else []
72
+ for alias in set(aliases + [format_type]):
73
+ _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error
74
+
75
+
76
+ # Here we define all the available formatting functions that can be used by `Dataset.set_format`
77
+ _register_formatter(PythonFormatter, None, aliases=["python"])
78
+ _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
79
+ _register_formatter(NumpyFormatter, "numpy", aliases=["np"])
80
+ _register_formatter(PandasFormatter, "pandas", aliases=["pd"])
81
+ _register_formatter(CustomFormatter, "custom")
82
+
83
+ if config.TORCH_AVAILABLE:
84
+ from .torch_formatter import TorchFormatter
85
+
86
+ _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
87
+ else:
88
+ _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
89
+ _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
90
+
91
+ if config.TF_AVAILABLE:
92
+ from .tf_formatter import TFFormatter
93
+
94
+ _register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
95
+ else:
96
+ _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
97
+ _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
98
+
99
+ if config.JAX_AVAILABLE:
100
+ from .jax_formatter import JaxFormatter
101
+
102
+ _register_formatter(JaxFormatter, "jax", aliases=[])
103
+ else:
104
+ _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.")
105
+ _register_unavailable_formatter(_jax_error, "jax", aliases=[])
106
+
107
+
108
+ def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]:
109
+ """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change."""
110
+ if format_type in _FORMAT_TYPES_ALIASES:
111
+ return _FORMAT_TYPES_ALIASES[format_type]
112
+ else:
113
+ return format_type
114
+
115
+
116
+ def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter:
117
+ """
118
+ Factory function to get a Formatter given its type name and keyword arguments.
119
+ A formatter is an object that extracts and formats data from pyarrow table.
120
+ It defines the formatting for rows, colums and batches.
121
+ If the formatter for a given type name doesn't exist or is not available, an error is raised.
122
+ """
123
+ format_type = get_format_type_from_alias(format_type)
124
+ if format_type in _FORMAT_TYPES:
125
+ return _FORMAT_TYPES[format_type](**format_kwargs)
126
+ if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
127
+ raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
128
+ else:
129
+ raise ValueError(
130
+ f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None)}, but got '{format_type}'"
131
+ )
env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/formatting.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/jax_formatter.cpython-310.pyc ADDED
Binary file (5.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/np_formatter.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/tf_formatter.cpython-310.pyc ADDED
Binary file (4.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc ADDED
Binary file (3.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/formatting/formatting.py ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from collections.abc import Mapping, MutableMapping
16
+ from functools import partial
17
+
18
+ # Lint as: python3
19
+ from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
20
+
21
+ import numpy as np
22
+ import pandas as pd
23
+ import pyarrow as pa
24
+ from packaging import version
25
+
26
+ from .. import config
27
+ from ..features import Features
28
+ from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper
29
+ from ..table import Table
30
+ from ..utils.py_utils import no_op_if_value_is_null
31
+
32
+
33
+ T = TypeVar("T")
34
+
35
+ RowFormat = TypeVar("RowFormat")
36
+ ColumnFormat = TypeVar("ColumnFormat")
37
+ BatchFormat = TypeVar("BatchFormat")
38
+
39
+
40
+ def _is_range_contiguous(key: range) -> bool:
41
+ return key.step == 1 and key.stop >= key.start
42
+
43
+
44
+ def _raise_bad_key_type(key: Any):
45
+ raise TypeError(
46
+ f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable."
47
+ )
48
+
49
+
50
+ def _query_table_with_indices_mapping(
51
+ table: Table, key: Union[int, slice, range, str, Iterable], indices: Table
52
+ ) -> pa.Table:
53
+ """
54
+ Query a pyarrow Table to extract the subtable that correspond to the given key.
55
+ The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into
56
+ account a shuffling or an indices selection for example.
57
+ The indices table must contain one column named "indices" of type uint64.
58
+ """
59
+ if isinstance(key, int):
60
+ key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py()
61
+ return _query_table(table, key)
62
+ if isinstance(key, slice):
63
+ key = range(*key.indices(indices.num_rows))
64
+ if isinstance(key, range):
65
+ if _is_range_contiguous(key) and key.start >= 0:
66
+ return _query_table(
67
+ table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)]
68
+ )
69
+ else:
70
+ pass # treat as an iterable
71
+ if isinstance(key, str):
72
+ table = table.select([key])
73
+ return _query_table(table, indices.column(0).to_pylist())
74
+ if isinstance(key, Iterable):
75
+ return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key])
76
+
77
+ _raise_bad_key_type(key)
78
+
79
+
80
+ def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table:
81
+ """
82
+ Query a pyarrow Table to extract the subtable that correspond to the given key.
83
+ """
84
+ if isinstance(key, int):
85
+ return table.fast_slice(key % table.num_rows, 1)
86
+ if isinstance(key, slice):
87
+ key = range(*key.indices(table.num_rows))
88
+ if isinstance(key, range):
89
+ if _is_range_contiguous(key) and key.start >= 0:
90
+ return table.fast_slice(key.start, key.stop - key.start)
91
+ else:
92
+ pass # treat as an iterable
93
+ if isinstance(key, str):
94
+ return table.table.drop([column for column in table.column_names if column != key])
95
+ if isinstance(key, Iterable):
96
+ key = np.fromiter(key, np.int64)
97
+ if len(key) == 0:
98
+ return table.table.slice(0, 0)
99
+ # don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773)
100
+ return table.fast_gather(key % table.num_rows)
101
+
102
+ _raise_bad_key_type(key)
103
+
104
+
105
+ def _is_array_with_nulls(pa_array: pa.Array) -> bool:
106
+ return pa_array.null_count > 0
107
+
108
+
109
+ class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
110
+ """
111
+ Arrow extractor are used to extract data from pyarrow tables.
112
+ It makes it possible to extract rows, columns and batches.
113
+ These three extractions types have to be implemented.
114
+ """
115
+
116
+ def extract_row(self, pa_table: pa.Table) -> RowFormat:
117
+ raise NotImplementedError
118
+
119
+ def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
120
+ raise NotImplementedError
121
+
122
+ def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
123
+ raise NotImplementedError
124
+
125
+
126
+ def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]:
127
+ """Return the first element of a batch (dict) as a row (dict)"""
128
+ return {key: array[0] for key, array in py_dict.items()}
129
+
130
+
131
+ class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
132
+ def extract_row(self, pa_table: pa.Table) -> pa.Table:
133
+ return pa_table
134
+
135
+ def extract_column(self, pa_table: pa.Table) -> pa.Array:
136
+ return pa_table.column(0)
137
+
138
+ def extract_batch(self, pa_table: pa.Table) -> pa.Table:
139
+ return pa_table
140
+
141
+
142
+ class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
143
+ def extract_row(self, pa_table: pa.Table) -> dict:
144
+ return _unnest(pa_table.to_pydict())
145
+
146
+ def extract_column(self, pa_table: pa.Table) -> list:
147
+ return pa_table.column(0).to_pylist()
148
+
149
+ def extract_batch(self, pa_table: pa.Table) -> dict:
150
+ return pa_table.to_pydict()
151
+
152
+
153
+ class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
154
+ def __init__(self, **np_array_kwargs):
155
+ self.np_array_kwargs = np_array_kwargs
156
+
157
+ def extract_row(self, pa_table: pa.Table) -> dict:
158
+ return _unnest(self.extract_batch(pa_table))
159
+
160
+ def extract_column(self, pa_table: pa.Table) -> np.ndarray:
161
+ return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
162
+
163
+ def extract_batch(self, pa_table: pa.Table) -> dict:
164
+ return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names}
165
+
166
+ def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
167
+ if isinstance(pa_array, pa.ChunkedArray):
168
+ if isinstance(pa_array.type, _ArrayXDExtensionType):
169
+ # don't call to_pylist() to preserve dtype of the fixed-size array
170
+ zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
171
+ array: List = [
172
+ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
173
+ ]
174
+ else:
175
+ zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
176
+ not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
177
+ )
178
+ array: List = [
179
+ row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
180
+ ]
181
+ else:
182
+ if isinstance(pa_array.type, _ArrayXDExtensionType):
183
+ # don't call to_pylist() to preserve dtype of the fixed-size array
184
+ zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
185
+ array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
186
+ else:
187
+ zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
188
+ array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist()
189
+ if len(array) > 0:
190
+ if any(
191
+ (isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape))
192
+ or (isinstance(x, float) and np.isnan(x))
193
+ for x in array
194
+ ):
195
+ return np.array(array, copy=False, dtype=object)
196
+ return np.array(array, copy=False)
197
+
198
+
199
+ class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
200
+ def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
201
+ return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
202
+
203
+ def extract_column(self, pa_table: pa.Table) -> pd.Series:
204
+ return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
205
+
206
+ def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
207
+ return pa_table.to_pandas(types_mapper=pandas_types_mapper)
208
+
209
+
210
+ class PythonFeaturesDecoder:
211
+ def __init__(self, features: Optional[Features]):
212
+ self.features = features
213
+
214
+ def decode_row(self, row: dict) -> dict:
215
+ return self.features.decode_example(row) if self.features else row
216
+
217
+ def decode_column(self, column: list, column_name: str) -> list:
218
+ return self.features.decode_column(column, column_name) if self.features else column
219
+
220
+ def decode_batch(self, batch: dict) -> dict:
221
+ return self.features.decode_batch(batch) if self.features else batch
222
+
223
+
224
+ class PandasFeaturesDecoder:
225
+ def __init__(self, features: Optional[Features]):
226
+ self.features = features
227
+
228
+ def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
229
+ decode = (
230
+ {
231
+ column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
232
+ for column_name, feature in self.features.items()
233
+ if self.features._column_requires_decoding[column_name]
234
+ }
235
+ if self.features
236
+ else {}
237
+ )
238
+ if decode:
239
+ row[list(decode.keys())] = row.transform(decode)
240
+ return row
241
+
242
+ def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
243
+ decode = (
244
+ no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
245
+ if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
246
+ else None
247
+ )
248
+ if decode:
249
+ column = column.transform(decode)
250
+ return column
251
+
252
+ def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
253
+ return self.decode_row(batch)
254
+
255
+
256
+ class LazyDict(MutableMapping):
257
+ """A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary."""
258
+
259
+ def __init__(self, pa_table: pa.Table, formatter: "Formatter"):
260
+ self.pa_table = pa_table
261
+ self.formatter = formatter
262
+
263
+ self.data = {key: None for key in pa_table.column_names}
264
+ self.keys_to_format = set(self.data.keys())
265
+
266
+ def __len__(self):
267
+ return len(self.data)
268
+
269
+ def __getitem__(self, key):
270
+ value = self.data[key]
271
+ if key in self.keys_to_format:
272
+ value = self.format(key)
273
+ self.data[key] = value
274
+ self.keys_to_format.remove(key)
275
+ return value
276
+
277
+ def __setitem__(self, key, value):
278
+ if key in self.keys_to_format:
279
+ self.keys_to_format.remove(key)
280
+ self.data[key] = value
281
+
282
+ def __delitem__(self, key) -> None:
283
+ if key in self.keys_to_format:
284
+ self.keys_to_format.remove(key)
285
+ del self.data[key]
286
+
287
+ def __iter__(self):
288
+ return iter(self.data)
289
+
290
+ def __contains__(self, key):
291
+ return key in self.data
292
+
293
+ def __repr__(self):
294
+ self._format_all()
295
+ return repr(self.data)
296
+
297
+ if config.PY_VERSION >= version.parse("3.9"):
298
+ # merging with the union ("|") operator is supported in Python 3.9+
299
+
300
+ def __or__(self, other):
301
+ if isinstance(other, LazyDict):
302
+ inst = self.copy()
303
+ other = other.copy()
304
+ other._format_all()
305
+ inst.keys_to_format -= other.data.keys()
306
+ inst.data = inst.data | other.data
307
+ return inst
308
+ if isinstance(other, dict):
309
+ inst = self.copy()
310
+ inst.keys_to_format -= other.keys()
311
+ inst.data = inst.data | other
312
+ return inst
313
+ return NotImplemented
314
+
315
+ def __ror__(self, other):
316
+ if isinstance(other, LazyDict):
317
+ inst = self.copy()
318
+ other = other.copy()
319
+ other._format_all()
320
+ inst.keys_to_format -= other.data.keys()
321
+ inst.data = other.data | inst.data
322
+ return inst
323
+ if isinstance(other, dict):
324
+ inst = self.copy()
325
+ inst.keys_to_format -= other.keys()
326
+ inst.data = other | inst.data
327
+ return inst
328
+ return NotImplemented
329
+
330
+ def __ior__(self, other):
331
+ if isinstance(other, LazyDict):
332
+ other = other.copy()
333
+ other._format_all()
334
+ self.keys_to_format -= other.data.keys()
335
+ self.data |= other.data
336
+ else:
337
+ self.keys_to_format -= other.keys()
338
+ self.data |= other
339
+ return self
340
+
341
+ def __copy__(self):
342
+ # Identical to `UserDict.__copy__`
343
+ inst = self.__class__.__new__(self.__class__)
344
+ inst.__dict__.update(self.__dict__)
345
+ # Create a copy and avoid triggering descriptors
346
+ inst.__dict__["data"] = self.__dict__["data"].copy()
347
+ inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy()
348
+ return inst
349
+
350
+ def copy(self):
351
+ import copy
352
+
353
+ return copy.copy(self)
354
+
355
+ @classmethod
356
+ def fromkeys(cls, iterable, value=None):
357
+ raise NotImplementedError
358
+
359
+ def format(self, key):
360
+ raise NotImplementedError
361
+
362
+ def _format_all(self):
363
+ for key in self.keys_to_format:
364
+ self.data[key] = self.format(key)
365
+ self.keys_to_format.clear()
366
+
367
+
368
+ class LazyRow(LazyDict):
369
+ def format(self, key):
370
+ return self.formatter.format_column(self.pa_table.select([key]))[0]
371
+
372
+
373
+ class LazyBatch(LazyDict):
374
+ def format(self, key):
375
+ return self.formatter.format_column(self.pa_table.select([key]))
376
+
377
+
378
+ class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
379
+ """
380
+ A formatter is an object that extracts and formats data from pyarrow tables.
381
+ It defines the formatting for rows, columns and batches.
382
+ """
383
+
384
+ simple_arrow_extractor = SimpleArrowExtractor
385
+ python_arrow_extractor = PythonArrowExtractor
386
+ numpy_arrow_extractor = NumpyArrowExtractor
387
+ pandas_arrow_extractor = PandasArrowExtractor
388
+
389
+ def __init__(self, features: Optional[Features] = None):
390
+ self.features = features
391
+ self.python_features_decoder = PythonFeaturesDecoder(self.features)
392
+ self.pandas_features_decoder = PandasFeaturesDecoder(self.features)
393
+
394
+ def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
395
+ if query_type == "row":
396
+ return self.format_row(pa_table)
397
+ elif query_type == "column":
398
+ return self.format_column(pa_table)
399
+ elif query_type == "batch":
400
+ return self.format_batch(pa_table)
401
+
402
+ def format_row(self, pa_table: pa.Table) -> RowFormat:
403
+ raise NotImplementedError
404
+
405
+ def format_column(self, pa_table: pa.Table) -> ColumnFormat:
406
+ raise NotImplementedError
407
+
408
+ def format_batch(self, pa_table: pa.Table) -> BatchFormat:
409
+ raise NotImplementedError
410
+
411
+
412
+ class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
413
+ def recursive_tensorize(self, data_struct: dict):
414
+ raise NotImplementedError
415
+
416
+
417
+ class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]):
418
+ def format_row(self, pa_table: pa.Table) -> pa.Table:
419
+ return self.simple_arrow_extractor().extract_row(pa_table)
420
+
421
+ def format_column(self, pa_table: pa.Table) -> pa.Array:
422
+ return self.simple_arrow_extractor().extract_column(pa_table)
423
+
424
+ def format_batch(self, pa_table: pa.Table) -> pa.Table:
425
+ return self.simple_arrow_extractor().extract_batch(pa_table)
426
+
427
+
428
+ class PythonFormatter(Formatter[Mapping, list, Mapping]):
429
+ def __init__(self, features=None, lazy=False):
430
+ super().__init__(features)
431
+ self.lazy = lazy
432
+
433
+ def format_row(self, pa_table: pa.Table) -> Mapping:
434
+ if self.lazy:
435
+ return LazyRow(pa_table, self)
436
+ row = self.python_arrow_extractor().extract_row(pa_table)
437
+ row = self.python_features_decoder.decode_row(row)
438
+ return row
439
+
440
+ def format_column(self, pa_table: pa.Table) -> list:
441
+ column = self.python_arrow_extractor().extract_column(pa_table)
442
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
443
+ return column
444
+
445
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
446
+ if self.lazy:
447
+ return LazyBatch(pa_table, self)
448
+ batch = self.python_arrow_extractor().extract_batch(pa_table)
449
+ batch = self.python_features_decoder.decode_batch(batch)
450
+ return batch
451
+
452
+
453
+ class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]):
454
+ def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
455
+ row = self.pandas_arrow_extractor().extract_row(pa_table)
456
+ row = self.pandas_features_decoder.decode_row(row)
457
+ return row
458
+
459
+ def format_column(self, pa_table: pa.Table) -> pd.Series:
460
+ column = self.pandas_arrow_extractor().extract_column(pa_table)
461
+ column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
462
+ return column
463
+
464
+ def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
465
+ row = self.pandas_arrow_extractor().extract_batch(pa_table)
466
+ row = self.pandas_features_decoder.decode_batch(row)
467
+ return row
468
+
469
+
470
+ class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
471
+ """
472
+ A user-defined custom formatter function defined by a ``transform``.
473
+ The transform must take as input a batch of data extracted for an arrow table using the python extractor,
474
+ and return a batch.
475
+ If the output batch is not a dict, then output_all_columns won't work.
476
+ If the ouput batch has several fields, then querying a single column won't work since we don't know which field
477
+ to return.
478
+ """
479
+
480
+ def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs):
481
+ super().__init__(features=features)
482
+ self.transform = transform
483
+
484
+ def format_row(self, pa_table: pa.Table) -> dict:
485
+ formatted_batch = self.format_batch(pa_table)
486
+ try:
487
+ return _unnest(formatted_batch)
488
+ except Exception as exc:
489
+ raise TypeError(
490
+ f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}"
491
+ ) from exc
492
+
493
+ def format_column(self, pa_table: pa.Table) -> ColumnFormat:
494
+ formatted_batch = self.format_batch(pa_table)
495
+ if hasattr(formatted_batch, "keys"):
496
+ if len(formatted_batch.keys()) > 1:
497
+ raise TypeError(
498
+ "Tried to query a column but the custom formatting function returns too many columns. "
499
+ f"Only one column was expected but got columns {list(formatted_batch.keys())}."
500
+ )
501
+ else:
502
+ raise TypeError(
503
+ f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
504
+ )
505
+ try:
506
+ return formatted_batch[pa_table.column_names[0]]
507
+ except Exception as exc:
508
+ raise TypeError(
509
+ f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
510
+ ) from exc
511
+
512
+ def format_batch(self, pa_table: pa.Table) -> dict:
513
+ batch = self.python_arrow_extractor().extract_batch(pa_table)
514
+ batch = self.python_features_decoder.decode_batch(batch)
515
+ return self.transform(batch)
516
+
517
+
518
+ def _check_valid_column_key(key: str, columns: List[str]) -> None:
519
+ if key not in columns:
520
+ raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}")
521
+
522
+
523
+ def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None:
524
+ if isinstance(key, int):
525
+ if (key < 0 and key + size < 0) or (key >= size):
526
+ raise IndexError(f"Invalid key: {key} is out of bounds for size {size}")
527
+ return
528
+ elif isinstance(key, slice):
529
+ pass
530
+ elif isinstance(key, range):
531
+ if len(key) > 0:
532
+ _check_valid_index_key(max(key), size=size)
533
+ _check_valid_index_key(min(key), size=size)
534
+ elif isinstance(key, Iterable):
535
+ if len(key) > 0:
536
+ _check_valid_index_key(int(max(key)), size=size)
537
+ _check_valid_index_key(int(min(key)), size=size)
538
+ else:
539
+ _raise_bad_key_type(key)
540
+
541
+
542
+ def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str:
543
+ if isinstance(key, int):
544
+ return "row"
545
+ elif isinstance(key, str):
546
+ return "column"
547
+ elif isinstance(key, (slice, range, Iterable)):
548
+ return "batch"
549
+ _raise_bad_key_type(key)
550
+
551
+
552
+ def query_table(
553
+ table: Table,
554
+ key: Union[int, slice, range, str, Iterable],
555
+ indices: Optional[Table] = None,
556
+ ) -> pa.Table:
557
+ """
558
+ Query a Table to extract the subtable that correspond to the given key.
559
+
560
+ Args:
561
+ table (``datasets.table.Table``): The input Table to query from
562
+ key (``Union[int, slice, range, str, Iterable]``): The key can be of different types:
563
+ - an integer i: the subtable containing only the i-th row
564
+ - a slice [i:j:k]: the subtable containing the rows that correspond to this slice
565
+ - a range(i, j, k): the subtable containing the rows that correspond to this range
566
+ - a string c: the subtable containing all the rows but only the column c
567
+ - an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable
568
+ indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows.
569
+ The indices table must contain one column named "indices" of type uint64.
570
+ This is used in case of shuffling or rows selection.
571
+
572
+
573
+ Returns:
574
+ ``pyarrow.Table``: the result of the query on the input table
575
+ """
576
+ # Check if key is valid
577
+ if not isinstance(key, (int, slice, range, str, Iterable)):
578
+ _raise_bad_key_type(key)
579
+ if isinstance(key, str):
580
+ _check_valid_column_key(key, table.column_names)
581
+ else:
582
+ size = indices.num_rows if indices is not None else table.num_rows
583
+ _check_valid_index_key(key, size)
584
+ # Query the main table
585
+ if indices is None:
586
+ pa_subtable = _query_table(table, key)
587
+ else:
588
+ pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices)
589
+ return pa_subtable
590
+
591
+
592
+ def format_table(
593
+ table: Table,
594
+ key: Union[int, slice, range, str, Iterable],
595
+ formatter: Formatter,
596
+ format_columns: Optional[list] = None,
597
+ output_all_columns=False,
598
+ ):
599
+ """
600
+ Format a Table depending on the key that was used and a Formatter object.
601
+
602
+ Args:
603
+ table (``datasets.table.Table``): The input Table to format
604
+ key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats
605
+ the table as either a row, a column or a batch.
606
+ formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as
607
+ PythonFormatter, NumpyFormatter, etc.
608
+ format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the
609
+ given formatter. Other columns are discarded (unless ``output_all_columns`` is True)
610
+ output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns
611
+ that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used.
612
+
613
+
614
+ Returns:
615
+ A row, column or batch formatted object defined by the Formatter:
616
+ - the PythonFormatter returns a dictionary for a row or a batch, and a list for a column.
617
+ - the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column.
618
+ - the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column.
619
+ - the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column.
620
+ - the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column.
621
+ """
622
+ if isinstance(table, Table):
623
+ pa_table = table.table
624
+ else:
625
+ pa_table = table
626
+ query_type = key_to_query_type(key)
627
+ python_formatter = PythonFormatter(features=formatter.features)
628
+ if format_columns is None:
629
+ return formatter(pa_table, query_type=query_type)
630
+ elif query_type == "column":
631
+ if key in format_columns:
632
+ return formatter(pa_table, query_type)
633
+ else:
634
+ return python_formatter(pa_table, query_type=query_type)
635
+ else:
636
+ pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns)
637
+ formatted_output = formatter(pa_table_to_format, query_type=query_type)
638
+ if output_all_columns:
639
+ if isinstance(formatted_output, MutableMapping):
640
+ pa_table_with_remaining_columns = pa_table.drop(
641
+ col for col in pa_table.column_names if col in format_columns
642
+ )
643
+ remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type)
644
+ formatted_output.update(remaining_columns_dict)
645
+ else:
646
+ raise TypeError(
647
+ f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}"
648
+ )
649
+ return formatted_output
env-llmeval/lib/python3.10/site-packages/datasets/formatting/jax_formatter.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ import sys
17
+ from collections.abc import Mapping
18
+ from typing import TYPE_CHECKING, Dict, Optional
19
+
20
+ import numpy as np
21
+ import pyarrow as pa
22
+
23
+ from .. import config
24
+ from ..utils.logging import get_logger
25
+ from ..utils.py_utils import map_nested
26
+ from .formatting import TensorFormatter
27
+
28
+
29
+ if TYPE_CHECKING:
30
+ import jax
31
+ import jaxlib
32
+
33
+ logger = get_logger()
34
+
35
+ DEVICE_MAPPING: Optional[dict] = None
36
+
37
+
38
+ class JaxFormatter(TensorFormatter[Mapping, "jax.Array", Mapping]):
39
+ def __init__(self, features=None, device=None, **jnp_array_kwargs):
40
+ super().__init__(features=features)
41
+ import jax
42
+ from jaxlib.xla_client import Device
43
+
44
+ if isinstance(device, Device):
45
+ raise ValueError(
46
+ f"Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` "
47
+ "is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
48
+ "the device with `str()` to get its string identifier that will be internally mapped "
49
+ "to the actual `jaxlib.xla_extension.Device`."
50
+ )
51
+ self.device = device if isinstance(device, str) else str(jax.devices()[0])
52
+ # using global variable since `jaxlib.xla_extension.Device` is not serializable neither
53
+ # with `pickle` nor with `dill`, so we need to use a global variable instead
54
+ global DEVICE_MAPPING
55
+ if DEVICE_MAPPING is None:
56
+ DEVICE_MAPPING = self._map_devices_to_str()
57
+ if self.device not in list(DEVICE_MAPPING.keys()):
58
+ logger.warning(
59
+ f"Device with string identifier {self.device} not listed among the available "
60
+ f"devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default "
61
+ f"device: {str(jax.devices()[0])}."
62
+ )
63
+ self.device = str(jax.devices()[0])
64
+ self.jnp_array_kwargs = jnp_array_kwargs
65
+
66
+ @staticmethod
67
+ def _map_devices_to_str() -> Dict[str, "jaxlib.xla_extension.Device"]:
68
+ import jax
69
+
70
+ return {str(device): device for device in jax.devices()}
71
+
72
+ def _consolidate(self, column):
73
+ import jax
74
+ import jax.numpy as jnp
75
+
76
+ if isinstance(column, list) and column:
77
+ if all(
78
+ isinstance(x, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
79
+ ):
80
+ return jnp.stack(column, axis=0)
81
+ return column
82
+
83
+ def _tensorize(self, value):
84
+ import jax
85
+ import jax.numpy as jnp
86
+
87
+ if isinstance(value, (str, bytes, type(None))):
88
+ return value
89
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
90
+ return value.tolist()
91
+
92
+ default_dtype = {}
93
+
94
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
95
+ # the default int precision depends on the jax config
96
+ # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
97
+ if jax.config.jax_enable_x64:
98
+ default_dtype = {"dtype": jnp.int64}
99
+ else:
100
+ default_dtype = {"dtype": jnp.int32}
101
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
102
+ default_dtype = {"dtype": jnp.float32}
103
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
104
+ import PIL.Image
105
+
106
+ if isinstance(value, PIL.Image.Image):
107
+ value = np.asarray(value)
108
+
109
+ # using global variable since `jaxlib.xla_extension.Device` is not serializable neither
110
+ # with `pickle` nor with `dill`, so we need to use a global variable instead
111
+ global DEVICE_MAPPING
112
+ if DEVICE_MAPPING is None:
113
+ DEVICE_MAPPING = self._map_devices_to_str()
114
+
115
+ with jax.default_device(DEVICE_MAPPING[self.device]):
116
+ # calling jnp.array on a np.ndarray does copy the data
117
+ # see https://github.com/google/jax/issues/4486
118
+ return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs})
119
+
120
+ def _recursive_tensorize(self, data_struct):
121
+ import jax
122
+
123
+ # support for torch, tf, jax etc.
124
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
125
+ import torch
126
+
127
+ if isinstance(data_struct, torch.Tensor):
128
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
129
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, jax.Array):
130
+ data_struct = data_struct.__array__()
131
+ # support for nested types like struct of list of struct
132
+ if isinstance(data_struct, np.ndarray):
133
+ if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
134
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
135
+ elif isinstance(data_struct, (list, tuple)):
136
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
137
+ return self._tensorize(data_struct)
138
+
139
+ def recursive_tensorize(self, data_struct: dict):
140
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
141
+
142
+ def format_row(self, pa_table: pa.Table) -> Mapping:
143
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
144
+ row = self.python_features_decoder.decode_row(row)
145
+ return self.recursive_tensorize(row)
146
+
147
+ def format_column(self, pa_table: pa.Table) -> "jax.Array":
148
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
149
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
150
+ column = self.recursive_tensorize(column)
151
+ column = self._consolidate(column)
152
+ return column
153
+
154
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
155
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
156
+ batch = self.python_features_decoder.decode_batch(batch)
157
+ batch = self.recursive_tensorize(batch)
158
+ for column_name in batch:
159
+ batch[column_name] = self._consolidate(batch[column_name])
160
+ return batch
env-llmeval/lib/python3.10/site-packages/datasets/formatting/np_formatter.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import sys
16
+ from collections.abc import Mapping
17
+
18
+ import numpy as np
19
+ import pyarrow as pa
20
+
21
+ from .. import config
22
+ from ..utils.py_utils import map_nested
23
+ from .formatting import TensorFormatter
24
+
25
+
26
+ class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]):
27
+ def __init__(self, features=None, **np_array_kwargs):
28
+ super().__init__(features=features)
29
+ self.np_array_kwargs = np_array_kwargs
30
+
31
+ def _consolidate(self, column):
32
+ if isinstance(column, list):
33
+ if column and all(
34
+ isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
35
+ ):
36
+ return np.stack(column)
37
+ else:
38
+ # don't use np.array(column, dtype=object)
39
+ # since it fails in certain cases
40
+ # see https://stackoverflow.com/q/51005699
41
+ out = np.empty(len(column), dtype=object)
42
+ out[:] = column
43
+ return out
44
+ return column
45
+
46
+ def _tensorize(self, value):
47
+ if isinstance(value, (str, bytes, type(None))):
48
+ return value
49
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
50
+ return value
51
+ elif isinstance(value, np.number):
52
+ return value
53
+
54
+ default_dtype = {}
55
+
56
+ if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
57
+ default_dtype = {"dtype": np.int64}
58
+ elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
59
+ default_dtype = {"dtype": np.float32}
60
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
61
+ import PIL.Image
62
+
63
+ if isinstance(value, PIL.Image.Image):
64
+ return np.asarray(value, **self.np_array_kwargs)
65
+
66
+ return np.asarray(value, **{**default_dtype, **self.np_array_kwargs})
67
+
68
+ def _recursive_tensorize(self, data_struct):
69
+ # support for torch, tf, jax etc.
70
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
71
+ import torch
72
+
73
+ if isinstance(data_struct, torch.Tensor):
74
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
75
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)):
76
+ data_struct = data_struct.__array__()
77
+ # support for nested types like struct of list of struct
78
+ if isinstance(data_struct, np.ndarray):
79
+ if data_struct.dtype == object:
80
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
81
+ if isinstance(data_struct, (list, tuple)):
82
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
83
+ return self._tensorize(data_struct)
84
+
85
+ def recursive_tensorize(self, data_struct: dict):
86
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
87
+
88
+ def format_row(self, pa_table: pa.Table) -> Mapping:
89
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
90
+ row = self.python_features_decoder.decode_row(row)
91
+ return self.recursive_tensorize(row)
92
+
93
+ def format_column(self, pa_table: pa.Table) -> np.ndarray:
94
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
95
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
96
+ column = self.recursive_tensorize(column)
97
+ column = self._consolidate(column)
98
+ return column
99
+
100
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
101
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
102
+ batch = self.python_features_decoder.decode_batch(batch)
103
+ batch = self.recursive_tensorize(batch)
104
+ for column_name in batch:
105
+ batch[column_name] = self._consolidate(batch[column_name])
106
+ return batch
env-llmeval/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ import sys
17
+ from collections.abc import Mapping
18
+ from typing import TYPE_CHECKING
19
+
20
+ import numpy as np
21
+ import pyarrow as pa
22
+
23
+ from .. import config
24
+ from ..utils.py_utils import map_nested
25
+ from .formatting import TensorFormatter
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ import tensorflow as tf
30
+
31
+
32
+ class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
33
+ def __init__(self, features=None, **tf_tensor_kwargs):
34
+ super().__init__(features=features)
35
+ self.tf_tensor_kwargs = tf_tensor_kwargs
36
+ import tensorflow as tf # noqa: F401 - import tf at initialization
37
+
38
+ def _consolidate(self, column):
39
+ import tensorflow as tf
40
+
41
+ if isinstance(column, list) and column:
42
+ if all(
43
+ isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
44
+ ):
45
+ return tf.stack(column)
46
+ elif all(
47
+ isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
48
+ for x in column
49
+ ):
50
+ # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
51
+ return tf.ragged.stack(column)
52
+
53
+ return column
54
+
55
+ def _tensorize(self, value):
56
+ import tensorflow as tf
57
+
58
+ if value is None:
59
+ return value
60
+
61
+ default_dtype = {}
62
+
63
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
64
+ default_dtype = {"dtype": tf.int64}
65
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
66
+ default_dtype = {"dtype": tf.float32}
67
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
68
+ import PIL.Image
69
+
70
+ if isinstance(value, PIL.Image.Image):
71
+ value = np.asarray(value)
72
+
73
+ return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
74
+
75
+ def _recursive_tensorize(self, data_struct):
76
+ import tensorflow as tf
77
+
78
+ # support for torch, tf, jax etc.
79
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
80
+ import torch
81
+
82
+ if isinstance(data_struct, torch.Tensor):
83
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
84
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
85
+ data_struct = data_struct.__array__()
86
+ # support for nested types like struct of list of struct
87
+ if isinstance(data_struct, np.ndarray):
88
+ if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
89
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
90
+ elif isinstance(data_struct, (list, tuple)):
91
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
92
+ return self._tensorize(data_struct)
93
+
94
+ def recursive_tensorize(self, data_struct: dict):
95
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
96
+
97
+ def format_row(self, pa_table: pa.Table) -> Mapping:
98
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
99
+ row = self.python_features_decoder.decode_row(row)
100
+ return self.recursive_tensorize(row)
101
+
102
+ def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
103
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
104
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
105
+ column = self.recursive_tensorize(column)
106
+ column = self._consolidate(column)
107
+ return column
108
+
109
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
110
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
111
+ batch = self.python_features_decoder.decode_batch(batch)
112
+ batch = self.recursive_tensorize(batch)
113
+ for column_name in batch:
114
+ batch[column_name] = self._consolidate(batch[column_name])
115
+ return batch
env-llmeval/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ import sys
17
+ from collections.abc import Mapping
18
+ from typing import TYPE_CHECKING
19
+
20
+ import numpy as np
21
+ import pyarrow as pa
22
+
23
+ from .. import config
24
+ from ..utils.py_utils import map_nested
25
+ from .formatting import TensorFormatter
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ import torch
30
+
31
+
32
+ class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
33
+ def __init__(self, features=None, **torch_tensor_kwargs):
34
+ super().__init__(features=features)
35
+ self.torch_tensor_kwargs = torch_tensor_kwargs
36
+ import torch # noqa import torch at initialization
37
+
38
+ def _consolidate(self, column):
39
+ import torch
40
+
41
+ if isinstance(column, list) and column:
42
+ if all(
43
+ isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
44
+ for x in column
45
+ ):
46
+ return torch.stack(column)
47
+ return column
48
+
49
+ def _tensorize(self, value):
50
+ import torch
51
+
52
+ if isinstance(value, (str, bytes, type(None))):
53
+ return value
54
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
55
+ return value.tolist()
56
+
57
+ default_dtype = {}
58
+
59
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
60
+ default_dtype = {"dtype": torch.int64}
61
+
62
+ # Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility.
63
+ # np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss.
64
+ if value.dtype in [np.uint16, np.uint32]:
65
+ value = value.astype(np.int64)
66
+
67
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
68
+ default_dtype = {"dtype": torch.float32}
69
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
70
+ import PIL.Image
71
+
72
+ if isinstance(value, PIL.Image.Image):
73
+ value = np.asarray(value)
74
+ return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
75
+
76
+ def _recursive_tensorize(self, data_struct):
77
+ import torch
78
+
79
+ # support for torch, tf, jax etc.
80
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
81
+ data_struct = data_struct.__array__()
82
+ # support for nested types like struct of list of struct
83
+ if isinstance(data_struct, np.ndarray):
84
+ if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
85
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
86
+ elif isinstance(data_struct, (list, tuple)):
87
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
88
+ return self._tensorize(data_struct)
89
+
90
+ def recursive_tensorize(self, data_struct: dict):
91
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
92
+
93
+ def format_row(self, pa_table: pa.Table) -> Mapping:
94
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
95
+ row = self.python_features_decoder.decode_row(row)
96
+ return self.recursive_tensorize(row)
97
+
98
+ def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
99
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
100
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
101
+ column = self.recursive_tensorize(column)
102
+ column = self._consolidate(column)
103
+ return column
104
+
105
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
106
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
107
+ batch = self.python_features_decoder.decode_batch(batch)
108
+ batch = self.recursive_tensorize(batch)
109
+ for column_name in batch:
110
+ batch[column_name] = self._consolidate(batch[column_name])
111
+ return batch
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc ADDED
Binary file (3.01 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc ADDED
Binary file (7.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc ADDED
Binary file (1.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Callable, Optional
3
+
4
+ import datasets
5
+
6
+
7
+ @dataclass
8
+ class GeneratorConfig(datasets.BuilderConfig):
9
+ generator: Optional[Callable] = None
10
+ gen_kwargs: Optional[dict] = None
11
+ features: Optional[datasets.Features] = None
12
+
13
+ def __post_init__(self):
14
+ assert self.generator is not None, "generator must be specified"
15
+
16
+ if self.gen_kwargs is None:
17
+ self.gen_kwargs = {}
18
+
19
+
20
+ class Generator(datasets.GeneratorBasedBuilder):
21
+ BUILDER_CONFIG_CLASS = GeneratorConfig
22
+
23
+ def _info(self):
24
+ return datasets.DatasetInfo(features=self.config.features)
25
+
26
+ def _split_generators(self, dl_manager):
27
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)]
28
+
29
+ def _generate_examples(self, **gen_kwargs):
30
+ for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
31
+ yield idx, ex
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import posixpath
3
+ import uuid
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
6
+
7
+ import numpy as np
8
+ import pyarrow as pa
9
+
10
+ import datasets
11
+ from datasets.arrow_writer import ArrowWriter, ParquetWriter
12
+ from datasets.config import MAX_SHARD_SIZE
13
+ from datasets.filesystems import (
14
+ is_remote_filesystem,
15
+ rename,
16
+ )
17
+ from datasets.iterable_dataset import _BaseExamplesIterable
18
+ from datasets.utils.py_utils import convert_file_size_to_int
19
+
20
+
21
+ logger = datasets.utils.logging.get_logger(__name__)
22
+
23
+ if TYPE_CHECKING:
24
+ import pyspark
25
+
26
+
27
+ @dataclass
28
+ class SparkConfig(datasets.BuilderConfig):
29
+ """BuilderConfig for Spark."""
30
+
31
+ features: Optional[datasets.Features] = None
32
+
33
+
34
+ def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]):
35
+ df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}")
36
+ for partition_id in new_partition_order[1:]:
37
+ partition_df = df.select("*").where(f"part_id = {partition_id}")
38
+ df_combined = df_combined.union(partition_df)
39
+ return df_combined
40
+
41
+
42
+ def _generate_iterable_examples(
43
+ df: "pyspark.sql.DataFrame",
44
+ partition_order: List[int],
45
+ ):
46
+ import pyspark
47
+
48
+ def generate_fn():
49
+ df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id"))
50
+ partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order)
51
+ row_id = 0
52
+ # pipeline next partition in parallel to hide latency
53
+ rows = partition_df.toLocalIterator(prefetchPartitions=True)
54
+ curr_partition = -1
55
+ for row in rows:
56
+ row_as_dict = row.asDict()
57
+ part_id = row_as_dict["part_id"]
58
+ row_as_dict.pop("part_id")
59
+ if curr_partition != part_id:
60
+ curr_partition = part_id
61
+ row_id = 0
62
+ yield f"{part_id}_{row_id}", row_as_dict
63
+ row_id += 1
64
+
65
+ return generate_fn
66
+
67
+
68
+ class SparkExamplesIterable(_BaseExamplesIterable):
69
+ def __init__(
70
+ self,
71
+ df: "pyspark.sql.DataFrame",
72
+ partition_order=None,
73
+ ):
74
+ self.df = df
75
+ self.partition_order = partition_order or range(self.df.rdd.getNumPartitions())
76
+ self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order)
77
+
78
+ def __iter__(self):
79
+ yield from self.generate_examples_fn()
80
+
81
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable":
82
+ partition_order = list(range(self.df.rdd.getNumPartitions()))
83
+ generator.shuffle(partition_order)
84
+ return SparkExamplesIterable(self.df, partition_order=partition_order)
85
+
86
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable":
87
+ partition_order = self.split_shard_indices_by_worker(worker_id, num_workers)
88
+ return SparkExamplesIterable(self.df, partition_order=partition_order)
89
+
90
+ @property
91
+ def n_shards(self) -> int:
92
+ return len(self.partition_order)
93
+
94
+
95
+ class Spark(datasets.DatasetBuilder):
96
+ BUILDER_CONFIG_CLASS = SparkConfig
97
+
98
+ def __init__(
99
+ self,
100
+ df: "pyspark.sql.DataFrame",
101
+ cache_dir: str = None,
102
+ working_dir: str = None,
103
+ **config_kwargs,
104
+ ):
105
+ import pyspark
106
+
107
+ self._spark = pyspark.sql.SparkSession.builder.getOrCreate()
108
+ self.df = df
109
+ self._working_dir = working_dir
110
+
111
+ super().__init__(
112
+ cache_dir=cache_dir,
113
+ config_name=str(self.df.semanticHash()),
114
+ **config_kwargs,
115
+ )
116
+
117
+ def _validate_cache_dir(self):
118
+ # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling
119
+ # error due to pickling the SparkContext.
120
+ cache_dir = self._cache_dir
121
+
122
+ # Returns the path of the created file.
123
+ def create_cache_and_write_probe(context):
124
+ # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
125
+ # already exist.
126
+ os.makedirs(cache_dir, exist_ok=True)
127
+ probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex)
128
+ # Opening the file in append mode will create a new file unless it already exists, in which case it will not
129
+ # change the file contents.
130
+ open(probe_file, "a")
131
+ return [probe_file]
132
+
133
+ if self._spark.conf.get("spark.master", "").startswith("local"):
134
+ return
135
+
136
+ # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
137
+ # accessible to the driver.
138
+ # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
139
+ if self._cache_dir:
140
+ probe = (
141
+ self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect()
142
+ )
143
+ if os.path.isfile(probe[0]):
144
+ return
145
+
146
+ raise ValueError(
147
+ "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir"
148
+ )
149
+
150
+ def _info(self):
151
+ return datasets.DatasetInfo(features=self.config.features)
152
+
153
+ def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager):
154
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
155
+
156
+ def _repartition_df_if_needed(self, max_shard_size):
157
+ import pyspark
158
+
159
+ def get_arrow_batch_size(it):
160
+ for batch in it:
161
+ yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]})
162
+
163
+ df_num_rows = self.df.count()
164
+ sample_num_rows = df_num_rows if df_num_rows <= 100 else 100
165
+ # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
166
+ approx_bytes_per_row = (
167
+ self.df.limit(sample_num_rows)
168
+ .repartition(1)
169
+ .mapInArrow(get_arrow_batch_size, "batch_bytes: long")
170
+ .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes"))
171
+ .collect()[0]
172
+ .sample_bytes
173
+ / sample_num_rows
174
+ )
175
+ approx_total_size = approx_bytes_per_row * df_num_rows
176
+ if approx_total_size > max_shard_size:
177
+ # Make sure there is at least one row per partition.
178
+ new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size))
179
+ self.df = self.df.repartition(new_num_partitions)
180
+
181
+ def _prepare_split_single(
182
+ self,
183
+ fpath: str,
184
+ file_format: str,
185
+ max_shard_size: int,
186
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
187
+ import pyspark
188
+
189
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
190
+ working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath
191
+ embed_local_files = file_format == "parquet"
192
+
193
+ # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
194
+ # pickling the SparkContext.
195
+ features = self.config.features
196
+ writer_batch_size = self._writer_batch_size
197
+ storage_options = self._fs.storage_options
198
+
199
+ def write_arrow(it):
200
+ # Within the same SparkContext, no two task attempts will share the same attempt ID.
201
+ task_id = pyspark.TaskContext().taskAttemptId()
202
+ first_batch = next(it, None)
203
+ if first_batch is None:
204
+ # Some partitions might not receive any data.
205
+ return pa.RecordBatch.from_arrays(
206
+ [[task_id], [0], [0]],
207
+ names=["task_id", "num_examples", "num_bytes"],
208
+ )
209
+ shard_id = 0
210
+ writer = writer_class(
211
+ features=features,
212
+ path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
213
+ writer_batch_size=writer_batch_size,
214
+ storage_options=storage_options,
215
+ embed_local_files=embed_local_files,
216
+ )
217
+ table = pa.Table.from_batches([first_batch])
218
+ writer.write_table(table)
219
+ for batch in it:
220
+ if max_shard_size is not None and writer._num_bytes >= max_shard_size:
221
+ num_examples, num_bytes = writer.finalize()
222
+ writer.close()
223
+ yield pa.RecordBatch.from_arrays(
224
+ [[task_id], [num_examples], [num_bytes]],
225
+ names=["task_id", "num_examples", "num_bytes"],
226
+ )
227
+ shard_id += 1
228
+ writer = writer_class(
229
+ features=writer._features,
230
+ path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
231
+ writer_batch_size=writer_batch_size,
232
+ storage_options=storage_options,
233
+ embed_local_files=embed_local_files,
234
+ )
235
+ table = pa.Table.from_batches([batch])
236
+ writer.write_table(table)
237
+
238
+ if writer._num_bytes > 0:
239
+ num_examples, num_bytes = writer.finalize()
240
+ writer.close()
241
+ yield pa.RecordBatch.from_arrays(
242
+ [[task_id], [num_examples], [num_bytes]],
243
+ names=["task_id", "num_examples", "num_bytes"],
244
+ )
245
+
246
+ if working_fpath != fpath:
247
+ for file in os.listdir(os.path.dirname(working_fpath)):
248
+ dest = os.path.join(os.path.dirname(fpath), os.path.basename(file))
249
+ shutil.move(file, dest)
250
+
251
+ stats = (
252
+ self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long")
253
+ .groupBy("task_id")
254
+ .agg(
255
+ pyspark.sql.functions.sum("num_examples").alias("total_num_examples"),
256
+ pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"),
257
+ pyspark.sql.functions.count("num_bytes").alias("num_shards"),
258
+ pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"),
259
+ )
260
+ .collect()
261
+ )
262
+ for row in stats:
263
+ yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
264
+
265
+ def _prepare_split(
266
+ self,
267
+ split_generator: "datasets.SplitGenerator",
268
+ file_format: str = "arrow",
269
+ max_shard_size: Optional[Union[str, int]] = None,
270
+ num_proc: Optional[int] = None,
271
+ **kwargs,
272
+ ):
273
+ self._validate_cache_dir()
274
+
275
+ max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
276
+ self._repartition_df_if_needed(max_shard_size)
277
+ is_local = not is_remote_filesystem(self._fs)
278
+ path_join = os.path.join if is_local else posixpath.join
279
+
280
+ SUFFIX = "-TTTTT-SSSSS-of-NNNNN"
281
+ fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
282
+ fpath = path_join(self._output_dir, fname)
283
+
284
+ total_num_examples = 0
285
+ total_num_bytes = 0
286
+ total_shards = 0
287
+ task_id_and_num_shards = []
288
+ all_shard_lengths = []
289
+
290
+ for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size):
291
+ (
292
+ num_examples,
293
+ num_bytes,
294
+ num_shards,
295
+ shard_lengths,
296
+ ) = content
297
+ if num_bytes > 0:
298
+ total_num_examples += num_examples
299
+ total_num_bytes += num_bytes
300
+ total_shards += num_shards
301
+ task_id_and_num_shards.append((task_id, num_shards))
302
+ all_shard_lengths.extend(shard_lengths)
303
+
304
+ split_generator.split_info.num_examples = total_num_examples
305
+ split_generator.split_info.num_bytes = total_num_bytes
306
+
307
+ # should rename everything at the end
308
+ logger.debug(f"Renaming {total_shards} shards.")
309
+ if total_shards > 1:
310
+ split_generator.split_info.shard_lengths = all_shard_lengths
311
+
312
+ # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
313
+ # pickling error due to pickling the SparkContext.
314
+ fs = self._fs
315
+
316
+ # use the -SSSSS-of-NNNNN pattern
317
+ def _rename_shard(
318
+ task_id: int,
319
+ shard_id: int,
320
+ global_shard_id: int,
321
+ ):
322
+ rename(
323
+ fs,
324
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
325
+ fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
326
+ )
327
+
328
+ args = []
329
+ global_shard_id = 0
330
+ for i in range(len(task_id_and_num_shards)):
331
+ task_id, num_shards = task_id_and_num_shards[i]
332
+ for shard_id in range(num_shards):
333
+ args.append([task_id, shard_id, global_shard_id])
334
+ global_shard_id += 1
335
+ self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect()
336
+ else:
337
+ # don't use any pattern
338
+ shard_id = 0
339
+ task_id = task_id_and_num_shards[0][0]
340
+ self._rename(
341
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
342
+ fpath.replace(SUFFIX, ""),
343
+ )
344
+
345
+ def _get_examples_iterable_for_split(
346
+ self,
347
+ split_generator: "datasets.SplitGenerator",
348
+ ) -> SparkExamplesIterable:
349
+ return SparkExamplesIterable(self.df)
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc ADDED
Binary file (4.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
4
+
5
+ import pandas as pd
6
+ import pyarrow as pa
7
+
8
+ import datasets
9
+ import datasets.config
10
+ from datasets.features.features import require_storage_cast
11
+ from datasets.table import table_cast
12
+
13
+
14
+ if TYPE_CHECKING:
15
+ import sqlite3
16
+
17
+ import sqlalchemy
18
+
19
+
20
+ logger = datasets.utils.logging.get_logger(__name__)
21
+
22
+
23
+ @dataclass
24
+ class SqlConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for SQL."""
26
+
27
+ sql: Union[str, "sqlalchemy.sql.Selectable"] = None
28
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
29
+ index_col: Optional[Union[str, List[str]]] = None
30
+ coerce_float: bool = True
31
+ params: Optional[Union[List, Tuple, Dict]] = None
32
+ parse_dates: Optional[Union[List, Dict]] = None
33
+ columns: Optional[List[str]] = None
34
+ chunksize: Optional[int] = 10_000
35
+ features: Optional[datasets.Features] = None
36
+
37
+ def __post_init__(self):
38
+ if self.sql is None:
39
+ raise ValueError("sql must be specified")
40
+ if self.con is None:
41
+ raise ValueError("con must be specified")
42
+
43
+ def create_config_id(
44
+ self,
45
+ config_kwargs: dict,
46
+ custom_features: Optional[datasets.Features] = None,
47
+ ) -> str:
48
+ config_kwargs = config_kwargs.copy()
49
+ # We need to stringify the Selectable object to make its hash deterministic
50
+
51
+ # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
52
+ sql = config_kwargs["sql"]
53
+ if not isinstance(sql, str):
54
+ if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
55
+ import sqlalchemy
56
+
57
+ if isinstance(sql, sqlalchemy.sql.Selectable):
58
+ engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
59
+ sql_str = str(sql.compile(dialect=engine.dialect))
60
+ config_kwargs["sql"] = sql_str
61
+ else:
62
+ raise TypeError(
63
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
64
+ )
65
+ else:
66
+ raise TypeError(
67
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
68
+ )
69
+ con = config_kwargs["con"]
70
+ if not isinstance(con, str):
71
+ config_kwargs["con"] = id(con)
72
+ logger.info(
73
+ f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
74
+ )
75
+
76
+ return super().create_config_id(config_kwargs, custom_features=custom_features)
77
+
78
+ @property
79
+ def pd_read_sql_kwargs(self):
80
+ pd_read_sql_kwargs = {
81
+ "index_col": self.index_col,
82
+ "columns": self.columns,
83
+ "params": self.params,
84
+ "coerce_float": self.coerce_float,
85
+ "parse_dates": self.parse_dates,
86
+ }
87
+ return pd_read_sql_kwargs
88
+
89
+
90
+ class Sql(datasets.ArrowBasedBuilder):
91
+ BUILDER_CONFIG_CLASS = SqlConfig
92
+
93
+ def _info(self):
94
+ return datasets.DatasetInfo(features=self.config.features)
95
+
96
+ def _split_generators(self, dl_manager):
97
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
98
+
99
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
100
+ if self.config.features is not None:
101
+ schema = self.config.features.arrow_schema
102
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
103
+ # cheaper cast
104
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
105
+ else:
106
+ # more expensive cast; allows str <-> int/float or str to Audio for example
107
+ pa_table = table_cast(pa_table, schema)
108
+ return pa_table
109
+
110
+ def _generate_tables(self):
111
+ chunksize = self.config.chunksize
112
+ sql_reader = pd.read_sql(
113
+ self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
114
+ )
115
+ sql_reader = [sql_reader] if chunksize is None else sql_reader
116
+ for chunk_idx, df in enumerate(sql_reader):
117
+ pa_table = pa.Table.from_pandas(df)
118
+ yield chunk_idx, self._cast_table(pa_table)