applied-ai-018 commited on
Commit
84dc634
·
verified ·
1 Parent(s): 42d2ace

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/datasets/__init__.py +70 -0
  2. llmeval-env/lib/python3.10/site-packages/datasets/commands/__init__.py +13 -0
  3. llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/convert_to_parquet.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/datasets/commands/convert.py +195 -0
  12. llmeval-env/lib/python3.10/site-packages/datasets/commands/convert_to_parquet.py +156 -0
  13. llmeval-env/lib/python3.10/site-packages/datasets/commands/datasets_cli.py +45 -0
  14. llmeval-env/lib/python3.10/site-packages/datasets/commands/dummy_data.py +468 -0
  15. llmeval-env/lib/python3.10/site-packages/datasets/commands/env.py +41 -0
  16. llmeval-env/lib/python3.10/site-packages/datasets/commands/run_beam.py +168 -0
  17. llmeval-env/lib/python3.10/site-packages/datasets/commands/test.py +201 -0
  18. llmeval-env/lib/python3.10/site-packages/datasets/data_files.py +821 -0
  19. llmeval-env/lib/python3.10/site-packages/datasets/download/__init__.py +10 -0
  20. llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/datasets/download/download_config.py +108 -0
  26. llmeval-env/lib/python3.10/site-packages/datasets/download/download_manager.py +448 -0
  27. llmeval-env/lib/python3.10/site-packages/datasets/download/mock_download_manager.py +244 -0
  28. llmeval-env/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py +210 -0
  29. llmeval-env/lib/python3.10/site-packages/datasets/filesystems/__init__.py +69 -0
  30. llmeval-env/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/datasets/filesystems/compression.py +123 -0
  34. llmeval-env/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py +116 -0
  35. llmeval-env/lib/python3.10/site-packages/datasets/io/__init__.py +0 -0
  36. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/datasets/io/abc.py +53 -0
  46. llmeval-env/lib/python3.10/site-packages/datasets/io/csv.py +145 -0
  47. llmeval-env/lib/python3.10/site-packages/datasets/io/generator.py +57 -0
  48. llmeval-env/lib/python3.10/site-packages/datasets/io/json.py +170 -0
  49. llmeval-env/lib/python3.10/site-packages/datasets/io/parquet.py +158 -0
  50. llmeval-env/lib/python3.10/site-packages/datasets/io/spark.py +57 -0
llmeval-env/lib/python3.10/site-packages/datasets/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ __version__ = "2.19.1"
17
+
18
+ from .arrow_dataset import Dataset
19
+ from .arrow_reader import ReadInstruction
20
+ from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
21
+ from .combine import concatenate_datasets, interleave_datasets
22
+ from .dataset_dict import DatasetDict, IterableDatasetDict
23
+ from .download import *
24
+ from .features import *
25
+ from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
26
+ from .info import DatasetInfo, MetricInfo
27
+ from .inspect import (
28
+ get_dataset_config_info,
29
+ get_dataset_config_names,
30
+ get_dataset_default_config_name,
31
+ get_dataset_infos,
32
+ get_dataset_split_names,
33
+ inspect_dataset,
34
+ inspect_metric,
35
+ list_datasets,
36
+ list_metrics,
37
+ )
38
+ from .iterable_dataset import IterableDataset
39
+ from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
40
+ from .metric import Metric
41
+ from .splits import (
42
+ NamedSplit,
43
+ NamedSplitAll,
44
+ Split,
45
+ SplitBase,
46
+ SplitDict,
47
+ SplitGenerator,
48
+ SplitInfo,
49
+ SubSplitInfo,
50
+ percent,
51
+ )
52
+ from .tasks import *
53
+ from .utils import *
54
+ from .utils import logging
55
+
56
+
57
+ # deprecated modules
58
+ from datasets import arrow_dataset as _arrow_dataset # isort:skip
59
+ from datasets import utils as _utils # isort:skip
60
+ from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
61
+
62
+ _arrow_dataset.concatenate_datasets = concatenate_datasets
63
+ _utils.DownloadConfig = DownloadConfig
64
+ _utils.DownloadManager = DownloadManager
65
+ _utils.DownloadMode = DownloadMode
66
+ _deprecated_download_manager.DownloadConfig = DownloadConfig
67
+ _deprecated_download_manager.DownloadMode = DownloadMode
68
+ _deprecated_download_manager.DownloadManager = DownloadManager
69
+
70
+ del _arrow_dataset, _utils, _deprecated_download_manager
llmeval-env/lib/python3.10/site-packages/datasets/commands/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from argparse import ArgumentParser
3
+
4
+
5
+ class BaseDatasetsCLICommand(ABC):
6
+ @staticmethod
7
+ @abstractmethod
8
+ def register_subcommand(parser: ArgumentParser):
9
+ raise NotImplementedError()
10
+
11
+ @abstractmethod
12
+ def run(self):
13
+ raise NotImplementedError()
llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (817 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc ADDED
Binary file (6.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/convert_to_parquet.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/env.cpython-310.pyc ADDED
Binary file (1.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/run_beam.cpython-310.pyc ADDED
Binary file (5.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc ADDED
Binary file (5.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/commands/convert.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import shutil
4
+ from argparse import ArgumentParser, Namespace
5
+
6
+ from datasets.commands import BaseDatasetsCLICommand
7
+ from datasets.utils.logging import get_logger
8
+
9
+
10
+ HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """
11
+
12
+ HIGHLIGHT_MESSAGE_POST = """=======
13
+ >>>>>>>
14
+ """
15
+
16
+ TO_HIGHLIGHT = [
17
+ "TextEncoderConfig",
18
+ "ByteTextEncoder",
19
+ "SubwordTextEncoder",
20
+ "encoder_config",
21
+ "maybe_build_from_corpus",
22
+ "manual_dir",
23
+ ]
24
+
25
+ TO_CONVERT = [
26
+ # (pattern, replacement)
27
+ # Order is important here for some replacements
28
+ (r"tfds\.core", r"datasets"),
29
+ (r"tf\.io\.gfile\.GFile", r"open"),
30
+ (r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
31
+ (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
32
+ (r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
33
+ (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
34
+ (r"tfds\.features\.FeaturesDict\(", r"dict("),
35
+ (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
36
+ (r"tfds\.", r"datasets."),
37
+ (r"dl_manager\.manual_dir", r"self.config.data_dir"),
38
+ (r"self\.builder_config", r"self.config"),
39
+ ]
40
+
41
+
42
+ def convert_command_factory(args: Namespace):
43
+ """
44
+ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
45
+
46
+ Returns: ConvertCommand
47
+ """
48
+ return ConvertCommand(args.tfds_path, args.datasets_directory)
49
+
50
+
51
+ class ConvertCommand(BaseDatasetsCLICommand):
52
+ @staticmethod
53
+ def register_subcommand(parser: ArgumentParser):
54
+ """
55
+ Register this command to argparse so it's available for the datasets-cli
56
+
57
+ Args:
58
+ parser: Root parser to register command-specific arguments
59
+ """
60
+ train_parser = parser.add_parser(
61
+ "convert",
62
+ help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.",
63
+ )
64
+ train_parser.add_argument(
65
+ "--tfds_path",
66
+ type=str,
67
+ required=True,
68
+ help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
69
+ )
70
+ train_parser.add_argument(
71
+ "--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder."
72
+ )
73
+ train_parser.set_defaults(func=convert_command_factory)
74
+
75
+ def __init__(self, tfds_path: str, datasets_directory: str, *args):
76
+ self._logger = get_logger("datasets-cli/converting")
77
+
78
+ self._tfds_path = tfds_path
79
+ self._datasets_directory = datasets_directory
80
+
81
+ def run(self):
82
+ if os.path.isdir(self._tfds_path):
83
+ abs_tfds_path = os.path.abspath(self._tfds_path)
84
+ elif os.path.isfile(self._tfds_path):
85
+ abs_tfds_path = os.path.dirname(self._tfds_path)
86
+ else:
87
+ raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
88
+
89
+ abs_datasets_path = os.path.abspath(self._datasets_directory)
90
+
91
+ self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
92
+
93
+ utils_files = []
94
+ with_manual_update = []
95
+ imports_to_builder_map = {}
96
+
97
+ if os.path.isdir(self._tfds_path):
98
+ file_names = os.listdir(abs_tfds_path)
99
+ else:
100
+ file_names = [os.path.basename(self._tfds_path)]
101
+
102
+ for f_name in file_names:
103
+ self._logger.info(f"Looking at file {f_name}")
104
+ input_file = os.path.join(abs_tfds_path, f_name)
105
+ output_file = os.path.join(abs_datasets_path, f_name)
106
+
107
+ if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
108
+ self._logger.info("Skipping file")
109
+ continue
110
+
111
+ with open(input_file, encoding="utf-8") as f:
112
+ lines = f.readlines()
113
+
114
+ out_lines = []
115
+ is_builder = False
116
+ needs_manual_update = False
117
+ tfds_imports = []
118
+ for line in lines:
119
+ out_line = line
120
+
121
+ # Convert imports
122
+ if "import tensorflow.compat.v2 as tf" in out_line:
123
+ continue
124
+ elif "@tfds.core" in out_line:
125
+ continue
126
+ elif "builder=self" in out_line:
127
+ continue
128
+ elif "import tensorflow_datasets.public_api as tfds" in out_line:
129
+ out_line = "import datasets\n"
130
+ elif "import tensorflow" in out_line:
131
+ # order is important here
132
+ out_line = ""
133
+ continue
134
+ elif "from absl import logging" in out_line:
135
+ out_line = "from datasets import logging\n"
136
+ elif "getLogger" in out_line:
137
+ out_line = out_line.replace("getLogger", "get_logger")
138
+ elif any(expression in out_line for expression in TO_HIGHLIGHT):
139
+ needs_manual_update = True
140
+ to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
141
+ out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
142
+ out_lines.append(out_line)
143
+ out_lines.append(HIGHLIGHT_MESSAGE_POST)
144
+ continue
145
+ else:
146
+ for pattern, replacement in TO_CONVERT:
147
+ out_line = re.sub(pattern, replacement, out_line)
148
+
149
+ # Take care of saving utilities (to later move them together with main script)
150
+ if "tensorflow_datasets" in out_line:
151
+ match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
152
+ tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
153
+ out_line = "from . import " + match.group(1)
154
+
155
+ # Check we have not forget anything
156
+ if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
157
+ raise ValueError(f"Error converting {out_line.strip()}")
158
+
159
+ if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
160
+ is_builder = True
161
+ out_lines.append(out_line)
162
+
163
+ if is_builder or "wmt" in f_name:
164
+ # We create a new directory for each dataset
165
+ dir_name = f_name.replace(".py", "")
166
+ output_dir = os.path.join(abs_datasets_path, dir_name)
167
+ output_file = os.path.join(output_dir, f_name)
168
+ os.makedirs(output_dir, exist_ok=True)
169
+ self._logger.info(f"Adding directory {output_dir}")
170
+ imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
171
+ else:
172
+ # Utilities will be moved at the end
173
+ utils_files.append(output_file)
174
+
175
+ if needs_manual_update:
176
+ with_manual_update.append(output_file)
177
+
178
+ with open(output_file, "w", encoding="utf-8") as f:
179
+ f.writelines(out_lines)
180
+ self._logger.info(f"Converted in {output_file}")
181
+
182
+ for utils_file in utils_files:
183
+ try:
184
+ f_name = os.path.basename(utils_file)
185
+ dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
186
+ self._logger.info(f"Moving {dest_folder} to {utils_file}")
187
+ shutil.copy(utils_file, dest_folder)
188
+ except KeyError:
189
+ self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
190
+
191
+ if with_manual_update:
192
+ for file_path in with_manual_update:
193
+ self._logger.warning(
194
+ f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
195
+ )
llmeval-env/lib/python3.10/site-packages/datasets/commands/convert_to_parquet.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from argparse import ArgumentParser
3
+ from typing import Optional
4
+
5
+ from huggingface_hub import HfApi, create_branch, get_repo_discussions
6
+
7
+ from datasets import get_dataset_config_names, get_dataset_default_config_name, load_dataset
8
+ from datasets.commands import BaseDatasetsCLICommand
9
+
10
+
11
+ def _command_factory(args):
12
+ return ConvertToParquetCommand(
13
+ args.dataset_id,
14
+ args.token,
15
+ args.revision,
16
+ args.trust_remote_code,
17
+ )
18
+
19
+
20
+ class ConvertToParquetCommand(BaseDatasetsCLICommand):
21
+ @staticmethod
22
+ def register_subcommand(parser):
23
+ parser: ArgumentParser = parser.add_parser("convert_to_parquet", help="Convert dataset to Parquet")
24
+ parser.add_argument("dataset_id", help="source dataset ID")
25
+ parser.add_argument("--token", help="access token to the Hugging Face Hub")
26
+ parser.add_argument("--revision", help="source revision")
27
+ parser.add_argument(
28
+ "--trust_remote_code", action="store_true", help="whether to trust the code execution of the load script"
29
+ )
30
+ parser.set_defaults(func=_command_factory)
31
+
32
+ def __init__(
33
+ self,
34
+ dataset_id: str,
35
+ token: Optional[str],
36
+ revision: Optional[str],
37
+ trust_remote_code: bool,
38
+ ):
39
+ self._dataset_id = dataset_id
40
+ self._token = token
41
+ self._revision = revision
42
+ self._trust_remote_code = trust_remote_code
43
+
44
+ def run(self) -> None:
45
+ dataset_id = self._dataset_id
46
+ token = self._token
47
+ revision = self._revision
48
+ trust_remote_code = self._trust_remote_code
49
+ print(f"{dataset_id}")
50
+ configs = get_dataset_config_names(
51
+ dataset_id, token=token, revision=revision, trust_remote_code=trust_remote_code
52
+ )
53
+ print(f"{configs = }")
54
+ default_config = get_dataset_default_config_name(
55
+ dataset_id, token=token, revision=revision, trust_remote_code=trust_remote_code
56
+ )
57
+ print(f"{default_config = }")
58
+ if default_config:
59
+ config = default_config
60
+ configs.remove(default_config)
61
+ else:
62
+ config = configs.pop(0)
63
+ print(f"{config = }")
64
+ dataset = load_dataset(dataset_id, config, revision=revision, trust_remote_code=trust_remote_code)
65
+ commit_info = dataset.push_to_hub(
66
+ dataset_id,
67
+ config_name=config,
68
+ commit_message="Convert dataset to Parquet",
69
+ commit_description="Convert dataset to Parquet.",
70
+ create_pr=True,
71
+ token=token,
72
+ set_default=default_config is not None,
73
+ )
74
+ time.sleep(5)
75
+ if commit_info:
76
+ pr_revision, pr_url = commit_info.pr_revision, commit_info.pr_url
77
+ else:
78
+ pr_revision, pr_url = infer_pr(dataset_id, token=token)
79
+ for config in configs:
80
+ print(f"{config = }")
81
+ dataset = load_dataset(dataset_id, config, revision=revision, trust_remote_code=trust_remote_code)
82
+ dataset.push_to_hub(
83
+ dataset_id,
84
+ config_name=config,
85
+ commit_message=f"Add {config} data files",
86
+ revision=pr_revision,
87
+ token=token,
88
+ )
89
+ time.sleep(5)
90
+ delete_files(dataset_id, revision=pr_revision, token=token)
91
+ if not revision:
92
+ create_branch(dataset_id, branch="script", repo_type="dataset", token=token, exist_ok=True)
93
+ print(f"You can find your PR to convert the dataset to Parquet at: {pr_url}")
94
+
95
+
96
+ def infer_pr(dataset_id, token=None):
97
+ discussions = get_repo_discussions(dataset_id, repo_type="dataset", token=token)
98
+ prs = [discussion for discussion in discussions if discussion.is_pull_request and discussion.status == "open"]
99
+ pr = sorted(prs, key=lambda pr: pr.num)[-1]
100
+ return pr.git_reference, pr.url
101
+
102
+
103
+ def delete_files(dataset_id, revision=None, token=None):
104
+ dataset_name = dataset_id.split("/")[-1]
105
+ hf_api = HfApi(token=token)
106
+ repo_files = hf_api.list_repo_files(
107
+ dataset_id,
108
+ repo_type="dataset",
109
+ )
110
+ if repo_files:
111
+ legacy_json_file = []
112
+ python_files = []
113
+ data_files = []
114
+ for filename in repo_files:
115
+ if filename in {".gitattributes", "README.md"}:
116
+ continue
117
+ elif filename == f"{dataset_name}.py":
118
+ hf_api.delete_file(
119
+ filename,
120
+ dataset_id,
121
+ repo_type="dataset",
122
+ revision=revision,
123
+ commit_message="Delete loading script",
124
+ )
125
+ elif filename == "dataset_infos.json":
126
+ legacy_json_file.append(filename)
127
+ elif filename.endswith(".py"):
128
+ python_files.append(filename)
129
+ else:
130
+ data_files.append(filename)
131
+ if legacy_json_file:
132
+ hf_api.delete_file(
133
+ "dataset_infos.json",
134
+ dataset_id,
135
+ repo_type="dataset",
136
+ revision=revision,
137
+ commit_message="Delete legacy dataset_infos.json",
138
+ )
139
+ if python_files:
140
+ for filename in python_files:
141
+ hf_api.delete_file(
142
+ filename,
143
+ dataset_id,
144
+ repo_type="dataset",
145
+ revision=revision,
146
+ commit_message="Delete loading script auxiliary file",
147
+ )
148
+ if data_files:
149
+ for filename in data_files:
150
+ hf_api.delete_file(
151
+ filename,
152
+ dataset_id,
153
+ repo_type="dataset",
154
+ revision=revision,
155
+ commit_message="Delete data file",
156
+ )
llmeval-env/lib/python3.10/site-packages/datasets/commands/datasets_cli.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ from argparse import ArgumentParser
3
+
4
+ from datasets.commands.convert import ConvertCommand
5
+ from datasets.commands.convert_to_parquet import ConvertToParquetCommand
6
+ from datasets.commands.dummy_data import DummyDataCommand
7
+ from datasets.commands.env import EnvironmentCommand
8
+ from datasets.commands.run_beam import RunBeamCommand
9
+ from datasets.commands.test import TestCommand
10
+ from datasets.utils.logging import set_verbosity_info
11
+
12
+
13
+ def parse_unknown_args(unknown_args):
14
+ return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
15
+
16
+
17
+ def main():
18
+ parser = ArgumentParser(
19
+ "HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
20
+ )
21
+ commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
22
+ set_verbosity_info()
23
+
24
+ # Register commands
25
+ ConvertCommand.register_subcommand(commands_parser)
26
+ EnvironmentCommand.register_subcommand(commands_parser)
27
+ TestCommand.register_subcommand(commands_parser)
28
+ RunBeamCommand.register_subcommand(commands_parser)
29
+ DummyDataCommand.register_subcommand(commands_parser)
30
+ ConvertToParquetCommand.register_subcommand(commands_parser)
31
+
32
+ # Parse args
33
+ args, unknown_args = parser.parse_known_args()
34
+ if not hasattr(args, "func"):
35
+ parser.print_help()
36
+ exit(1)
37
+ kwargs = parse_unknown_args(unknown_args)
38
+
39
+ # Run
40
+ service = args.func(args, **kwargs)
41
+ service.run()
42
+
43
+
44
+ if __name__ == "__main__":
45
+ main()
llmeval-env/lib/python3.10/site-packages/datasets/commands/dummy_data.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fnmatch
2
+ import json
3
+ import os
4
+ import shutil
5
+ import tempfile
6
+ import xml.etree.ElementTree as ET
7
+ from argparse import ArgumentParser
8
+ from pathlib import Path
9
+ from typing import Optional
10
+
11
+ from datasets import config
12
+ from datasets.commands import BaseDatasetsCLICommand
13
+ from datasets.download.download_config import DownloadConfig
14
+ from datasets.download.download_manager import DownloadManager
15
+ from datasets.download.mock_download_manager import MockDownloadManager
16
+ from datasets.load import dataset_module_factory, import_main_class
17
+ from datasets.utils.deprecation_utils import deprecated
18
+ from datasets.utils.logging import get_logger, set_verbosity_warning
19
+ from datasets.utils.py_utils import map_nested
20
+
21
+
22
+ logger = get_logger(__name__)
23
+
24
+ DEFAULT_ENCODING = "utf-8"
25
+
26
+
27
+ def dummy_data_command_factory(args):
28
+ return DummyDataCommand(
29
+ args.path_to_dataset,
30
+ args.auto_generate,
31
+ args.n_lines,
32
+ args.json_field,
33
+ args.xml_tag,
34
+ args.match_text_files,
35
+ args.keep_uncompressed,
36
+ args.cache_dir,
37
+ args.encoding,
38
+ )
39
+
40
+
41
+ class DummyDataGeneratorDownloadManager(DownloadManager):
42
+ def __init__(self, mock_download_manager, *args, **kwargs):
43
+ super().__init__(*args, **kwargs)
44
+ self.mock_download_manager = mock_download_manager
45
+ self.downloaded_dummy_paths = []
46
+ self.expected_dummy_paths = []
47
+
48
+ def download(self, url_or_urls):
49
+ output = super().download(url_or_urls)
50
+ dummy_output = self.mock_download_manager.download(url_or_urls)
51
+ map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
52
+ map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
53
+ return output
54
+
55
+ def download_and_extract(self, url_or_urls):
56
+ output = super().extract(super().download(url_or_urls))
57
+ dummy_output = self.mock_download_manager.download(url_or_urls)
58
+ map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
59
+ map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
60
+ return output
61
+
62
+ def auto_generate_dummy_data_folder(
63
+ self,
64
+ n_lines: int = 5,
65
+ json_field: Optional[str] = None,
66
+ xml_tag: Optional[str] = None,
67
+ match_text_files: Optional[str] = None,
68
+ encoding: Optional[str] = None,
69
+ ) -> bool:
70
+ os.makedirs(
71
+ os.path.join(
72
+ self.mock_download_manager.datasets_scripts_dir,
73
+ self.mock_download_manager.dataset_name,
74
+ self.mock_download_manager.dummy_data_folder,
75
+ "dummy_data",
76
+ ),
77
+ exist_ok=True,
78
+ )
79
+ total = 0
80
+ self.mock_download_manager.load_existing_dummy_data = False
81
+ for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths):
82
+ dst_path = os.path.join(
83
+ self.mock_download_manager.datasets_scripts_dir,
84
+ self.mock_download_manager.dataset_name,
85
+ self.mock_download_manager.dummy_data_folder,
86
+ relative_dst_path,
87
+ )
88
+ total += self._create_dummy_data(
89
+ src_path,
90
+ dst_path,
91
+ n_lines=n_lines,
92
+ json_field=json_field,
93
+ xml_tag=xml_tag,
94
+ match_text_files=match_text_files,
95
+ encoding=encoding,
96
+ )
97
+ if total == 0:
98
+ logger.error(
99
+ "Dummy data generation failed: no dummy files were created. "
100
+ "Make sure the data files format is supported by the auto-generation."
101
+ )
102
+ return total > 0
103
+
104
+ def _create_dummy_data(
105
+ self,
106
+ src_path: str,
107
+ dst_path: str,
108
+ n_lines: int,
109
+ json_field: Optional[str] = None,
110
+ xml_tag: Optional[str] = None,
111
+ match_text_files: Optional[str] = None,
112
+ encoding: Optional[str] = None,
113
+ ) -> int:
114
+ encoding = encoding or DEFAULT_ENCODING
115
+ if os.path.isfile(src_path):
116
+ logger.debug(f"Trying to generate dummy data file {dst_path}")
117
+ dst_path_extensions = Path(dst_path).suffixes
118
+ line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"]
119
+ is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions)
120
+ if match_text_files is not None:
121
+ file_name = os.path.basename(dst_path)
122
+ for pattern in match_text_files.split(","):
123
+ is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern)
124
+ # Line by line text file (txt, csv etc.)
125
+ if is_line_by_line_text_file:
126
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
127
+ with open(src_path, encoding=encoding) as src_file:
128
+ with open(dst_path, "w", encoding=encoding) as dst_file:
129
+ first_lines = []
130
+ for i, line in enumerate(src_file):
131
+ if i >= n_lines:
132
+ break
133
+ first_lines.append(line)
134
+ dst_file.write("".join(first_lines).strip())
135
+ return 1
136
+ # json file
137
+ elif ".json" in dst_path_extensions:
138
+ with open(src_path, encoding=encoding) as src_file:
139
+ json_data = json.load(src_file)
140
+ if json_field is not None:
141
+ json_data = json_data[json_field]
142
+ if isinstance(json_data, dict):
143
+ if not all(isinstance(v, list) for v in json_data.values()):
144
+ raise ValueError(
145
+ f"Couldn't parse columns {list(json_data.keys())}. "
146
+ "Maybe specify which json field must be used "
147
+ "to read the data with --json_field <my_field>."
148
+ )
149
+ first_json_data = {k: v[:n_lines] for k, v in json_data.items()}
150
+ else:
151
+ first_json_data = json_data[:n_lines]
152
+ if json_field is not None:
153
+ first_json_data = {json_field: first_json_data}
154
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
155
+ with open(dst_path, "w", encoding=encoding) as dst_file:
156
+ json.dump(first_json_data, dst_file)
157
+ return 1
158
+ # xml file
159
+ elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]):
160
+ if xml_tag is None:
161
+ logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag")
162
+ else:
163
+ self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding)
164
+ return 1
165
+ logger.warning(
166
+ f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data."
167
+ )
168
+ return 0
169
+ # directory, iterate through all files
170
+ elif os.path.isdir(src_path):
171
+ total = 0
172
+ for path, _, files in os.walk(src_path):
173
+ for name in files:
174
+ if not name.startswith("."): # ignore files like .DS_Store etc.
175
+ src_file_path = os.path.join(path, name)
176
+ dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path))
177
+ total += self._create_dummy_data(
178
+ src_file_path,
179
+ dst_file_path,
180
+ n_lines=n_lines,
181
+ json_field=json_field,
182
+ xml_tag=xml_tag,
183
+ match_text_files=match_text_files,
184
+ encoding=encoding,
185
+ )
186
+ return total
187
+
188
+ @staticmethod
189
+ def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING):
190
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
191
+ with open(src_path, encoding=encoding) as src_file:
192
+ n_line = 0
193
+ parents = []
194
+ for event, elem in ET.iterparse(src_file, events=("start", "end")):
195
+ if event == "start":
196
+ parents.append(elem)
197
+ else:
198
+ _ = parents.pop()
199
+ if elem.tag == xml_tag:
200
+ if n_line < n_lines:
201
+ n_line += 1
202
+ else:
203
+ if parents:
204
+ parents[-1].remove(elem)
205
+ ET.ElementTree(element=elem).write(dst_path, encoding=encoding)
206
+
207
+ def compress_autogenerated_dummy_data(self, path_to_dataset):
208
+ root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder)
209
+ base_name = os.path.join(root_dir, "dummy_data")
210
+ base_dir = "dummy_data"
211
+ logger.info(f"Compressing dummy data folder to '{base_name}.zip'")
212
+ shutil.make_archive(base_name, "zip", root_dir, base_dir)
213
+ shutil.rmtree(base_name)
214
+
215
+
216
+ @deprecated(
217
+ "The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI."
218
+ )
219
+ class DummyDataCommand(BaseDatasetsCLICommand):
220
+ @staticmethod
221
+ def register_subcommand(parser: ArgumentParser):
222
+ test_parser = parser.add_parser("dummy_data", help="Generate dummy data.")
223
+ test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data")
224
+ test_parser.add_argument(
225
+ "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data"
226
+ )
227
+ test_parser.add_argument(
228
+ "--json_field",
229
+ type=str,
230
+ default=None,
231
+ help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)",
232
+ )
233
+ test_parser.add_argument(
234
+ "--xml_tag",
235
+ type=str,
236
+ default=None,
237
+ help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.",
238
+ )
239
+ test_parser.add_argument(
240
+ "--match_text_files",
241
+ type=str,
242
+ default=None,
243
+ help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label",
244
+ )
245
+ test_parser.add_argument(
246
+ "--keep_uncompressed",
247
+ action="store_true",
248
+ help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.",
249
+ )
250
+ test_parser.add_argument(
251
+ "--cache_dir",
252
+ type=str,
253
+ default=None,
254
+ help="Cache directory to download and cache files when auto-generating dummy data",
255
+ )
256
+ test_parser.add_argument(
257
+ "--encoding",
258
+ type=str,
259
+ default=None,
260
+ help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}",
261
+ )
262
+ test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)")
263
+ test_parser.set_defaults(func=dummy_data_command_factory)
264
+
265
+ def __init__(
266
+ self,
267
+ path_to_dataset: str,
268
+ auto_generate: bool,
269
+ n_lines: int,
270
+ json_field: Optional[str],
271
+ xml_tag: Optional[str],
272
+ match_text_files: Optional[str],
273
+ keep_uncompressed: bool,
274
+ cache_dir: Optional[str],
275
+ encoding: Optional[str],
276
+ ):
277
+ self._path_to_dataset = path_to_dataset
278
+ if os.path.isdir(path_to_dataset):
279
+ self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1]
280
+ else:
281
+ self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2]
282
+ cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE)
283
+ self._auto_generate = auto_generate
284
+ self._n_lines = n_lines
285
+ self._json_field = json_field
286
+ self._xml_tag = xml_tag
287
+ self._match_text_files = match_text_files
288
+ self._keep_uncompressed = keep_uncompressed
289
+ self._cache_dir = cache_dir
290
+ self._encoding = encoding
291
+
292
+ def run(self):
293
+ set_verbosity_warning()
294
+ dataset_module = dataset_module_factory(self._path_to_dataset)
295
+ builder_cls = import_main_class(dataset_module.module_path)
296
+
297
+ # use `None` as config if no configs
298
+ builder_configs = builder_cls.BUILDER_CONFIGS or [None]
299
+ auto_generate_results = []
300
+ with tempfile.TemporaryDirectory() as tmp_dir:
301
+ for builder_config in builder_configs:
302
+ config_name = builder_config.name if builder_config else None
303
+ dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir)
304
+ version = builder_config.version if builder_config else dataset_builder.config.version
305
+ mock_dl_manager = MockDownloadManager(
306
+ dataset_name=self._dataset_name,
307
+ config=builder_config,
308
+ version=version,
309
+ use_local_dummy_data=True,
310
+ load_existing_dummy_data=False,
311
+ )
312
+
313
+ if self._auto_generate:
314
+ auto_generate_results.append(
315
+ self._autogenerate_dummy_data(
316
+ dataset_builder=dataset_builder,
317
+ mock_dl_manager=mock_dl_manager,
318
+ keep_uncompressed=self._keep_uncompressed,
319
+ )
320
+ )
321
+ else:
322
+ self._print_dummy_data_instructions(
323
+ dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager
324
+ )
325
+ if self._auto_generate and not self._keep_uncompressed:
326
+ if all(auto_generate_results):
327
+ print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'")
328
+ else:
329
+ print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'")
330
+
331
+ def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]:
332
+ dl_cache_dir = (
333
+ os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR)
334
+ if self._cache_dir
335
+ else config.DOWNLOADED_DATASETS_PATH
336
+ )
337
+ download_config = DownloadConfig(cache_dir=dl_cache_dir)
338
+ dl_manager = DummyDataGeneratorDownloadManager(
339
+ dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config
340
+ )
341
+ dataset_builder._split_generators(dl_manager)
342
+ mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data
343
+ dl_manager.auto_generate_dummy_data_folder(
344
+ n_lines=self._n_lines,
345
+ json_field=self._json_field,
346
+ xml_tag=self._xml_tag,
347
+ match_text_files=self._match_text_files,
348
+ encoding=self._encoding,
349
+ )
350
+ if not keep_uncompressed:
351
+ path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name)
352
+ dl_manager.compress_autogenerated_dummy_data(path_do_dataset)
353
+ # now test that the dummy_data.zip file actually works
354
+ mock_dl_manager.load_existing_dummy_data = True # use real dummy data
355
+ n_examples_per_split = {}
356
+ os.makedirs(dataset_builder._cache_dir, exist_ok=True)
357
+ try:
358
+ split_generators = dataset_builder._split_generators(mock_dl_manager)
359
+ for split_generator in split_generators:
360
+ dataset_builder._prepare_split(split_generator, check_duplicate_keys=False)
361
+ n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples
362
+ except OSError as e:
363
+ logger.error(
364
+ f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n"
365
+ + str(e)
366
+ )
367
+ return False
368
+ else:
369
+ if all(n_examples > 0 for n_examples in n_examples_per_split.values()):
370
+ logger.warning(
371
+ f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''."
372
+ )
373
+ return True
374
+ else:
375
+ empty_splits = [
376
+ split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0
377
+ ]
378
+ logger.warning(
379
+ f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''."
380
+ )
381
+ return False
382
+ else:
383
+ generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
384
+ logger.info(
385
+ f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. "
386
+ "Please compress this directory into a zip file to use it for dummy data tests."
387
+ )
388
+
389
+ def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager):
390
+ dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
391
+ logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ")
392
+ os.makedirs(dummy_data_folder, exist_ok=True)
393
+
394
+ try:
395
+ generator_splits = dataset_builder._split_generators(mock_dl_manager)
396
+ except FileNotFoundError as e:
397
+ print(
398
+ f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}."
399
+ )
400
+
401
+ files_to_create = set()
402
+ split_names = []
403
+ dummy_file_name = mock_dl_manager.dummy_file_name
404
+
405
+ for split in generator_splits:
406
+ logger.info(f"Collecting dummy data file paths to create for {split.name}")
407
+ split_names.append(split.name)
408
+ gen_kwargs = split.gen_kwargs
409
+ generator = dataset_builder._generate_examples(**gen_kwargs)
410
+
411
+ try:
412
+ dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n"
413
+ config_string = (
414
+ f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else ""
415
+ )
416
+ dummy_data_guidance_print += (
417
+ "- In order to create the dummy data for "
418
+ + config_string
419
+ + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n"
420
+ )
421
+
422
+ # trigger generate function
423
+ for key, record in generator:
424
+ pass
425
+
426
+ dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n"
427
+
428
+ except FileNotFoundError as e:
429
+ files_to_create.add(e.filename)
430
+
431
+ split_names = ", ".join(split_names)
432
+ if len(files_to_create) > 0:
433
+ # no glob.glob(...) in `_generate_examples(...)`
434
+ if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
435
+ dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n"
436
+ files_string = dummy_file_name
437
+ else:
438
+ files_string = ", ".join(files_to_create)
439
+ dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n"
440
+
441
+ dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n"
442
+
443
+ dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n"
444
+
445
+ if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
446
+ dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n"
447
+
448
+ dummy_data_guidance_print += (
449
+ f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n"
450
+ )
451
+
452
+ dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
453
+ else:
454
+ dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n"
455
+
456
+ dummy_data_guidance_print += (
457
+ f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n"
458
+ )
459
+
460
+ dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
461
+
462
+ dummy_data_guidance_print += (
463
+ f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n"
464
+ )
465
+
466
+ dummy_data_guidance_print += 83 * "=" + "\n"
467
+
468
+ print(dummy_data_guidance_print)
llmeval-env/lib/python3.10/site-packages/datasets/commands/env.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+ from argparse import ArgumentParser
3
+
4
+ import fsspec
5
+ import huggingface_hub
6
+ import pandas
7
+ import pyarrow
8
+
9
+ from datasets import __version__ as version
10
+ from datasets.commands import BaseDatasetsCLICommand
11
+
12
+
13
+ def info_command_factory(_):
14
+ return EnvironmentCommand()
15
+
16
+
17
+ class EnvironmentCommand(BaseDatasetsCLICommand):
18
+ @staticmethod
19
+ def register_subcommand(parser: ArgumentParser):
20
+ download_parser = parser.add_parser("env", help="Print relevant system environment info.")
21
+ download_parser.set_defaults(func=info_command_factory)
22
+
23
+ def run(self):
24
+ info = {
25
+ "`datasets` version": version,
26
+ "Platform": platform.platform(),
27
+ "Python version": platform.python_version(),
28
+ "`huggingface_hub` version": huggingface_hub.__version__,
29
+ "PyArrow version": pyarrow.__version__,
30
+ "Pandas version": pandas.__version__,
31
+ "`fsspec` version": fsspec.__version__,
32
+ }
33
+
34
+ print("\nCopy-and-paste the text below in your GitHub issue.\n")
35
+ print(self.format_dict(info))
36
+
37
+ return info
38
+
39
+ @staticmethod
40
+ def format_dict(d):
41
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
llmeval-env/lib/python3.10/site-packages/datasets/commands/run_beam.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from argparse import ArgumentParser
3
+ from pathlib import Path
4
+ from shutil import copyfile
5
+ from typing import List
6
+
7
+ from datasets import config
8
+ from datasets.builder import DatasetBuilder
9
+ from datasets.commands import BaseDatasetsCLICommand
10
+ from datasets.download.download_config import DownloadConfig
11
+ from datasets.download.download_manager import DownloadMode
12
+ from datasets.load import dataset_module_factory, import_main_class
13
+ from datasets.utils.deprecation_utils import deprecated
14
+ from datasets.utils.info_utils import VerificationMode
15
+
16
+
17
+ def run_beam_command_factory(args, **kwargs):
18
+ return RunBeamCommand(
19
+ args.dataset,
20
+ args.name,
21
+ args.cache_dir,
22
+ args.beam_pipeline_options,
23
+ args.data_dir,
24
+ args.all_configs,
25
+ args.save_info or args.save_infos,
26
+ args.ignore_verifications,
27
+ args.force_redownload,
28
+ **kwargs,
29
+ )
30
+
31
+
32
+ @deprecated(
33
+ "`BeamBasedBuilder` and `datasets-cli run_beam` are deprecated and will be removed in v3.0.0. Please use `GeneratorBasedBuilder` or `ArrowBasedBuilder` instead."
34
+ )
35
+ class RunBeamCommand(BaseDatasetsCLICommand):
36
+ @staticmethod
37
+ def register_subcommand(parser: ArgumentParser):
38
+ run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline")
39
+ run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
40
+ run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name")
41
+ run_beam_parser.add_argument(
42
+ "--cache_dir",
43
+ type=str,
44
+ default=None,
45
+ help="Cache directory where the datasets are stored",
46
+ )
47
+ run_beam_parser.add_argument(
48
+ "--beam_pipeline_options",
49
+ type=str,
50
+ default="",
51
+ help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`",
52
+ )
53
+ run_beam_parser.add_argument(
54
+ "--data_dir",
55
+ type=str,
56
+ default=None,
57
+ help="Can be used to specify a manual directory to get the files from",
58
+ )
59
+ run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
60
+ run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file")
61
+ run_beam_parser.add_argument(
62
+ "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
63
+ )
64
+ run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
65
+ # aliases
66
+ run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info")
67
+ run_beam_parser.set_defaults(func=run_beam_command_factory)
68
+
69
+ def __init__(
70
+ self,
71
+ dataset: str,
72
+ name: str,
73
+ cache_dir: str,
74
+ beam_pipeline_options: str,
75
+ data_dir: str,
76
+ all_configs: bool,
77
+ save_infos: bool,
78
+ ignore_verifications: bool,
79
+ force_redownload: bool,
80
+ **config_kwargs,
81
+ ):
82
+ self._dataset = dataset
83
+ self._name = name
84
+ self._cache_dir = cache_dir
85
+ self._beam_pipeline_options = beam_pipeline_options
86
+ self._data_dir = data_dir
87
+ self._all_configs = all_configs
88
+ self._save_infos = save_infos
89
+ self._ignore_verifications = ignore_verifications
90
+ self._force_redownload = force_redownload
91
+ self._config_kwargs = config_kwargs
92
+
93
+ def run(self):
94
+ import apache_beam as beam
95
+
96
+ if self._name is not None and self._all_configs:
97
+ print("Both parameters `name` and `all_configs` can't be used at once.")
98
+ exit(1)
99
+ path, config_name = self._dataset, self._name
100
+ dataset_module = dataset_module_factory(path)
101
+ builder_cls = import_main_class(dataset_module.module_path)
102
+ builders: List[DatasetBuilder] = []
103
+ if self._beam_pipeline_options:
104
+ beam_options = beam.options.pipeline_options.PipelineOptions(
105
+ flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt]
106
+ )
107
+ else:
108
+ beam_options = None
109
+ if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0:
110
+ for builder_config in builder_cls.BUILDER_CONFIGS:
111
+ builders.append(
112
+ builder_cls(
113
+ config_name=builder_config.name,
114
+ data_dir=self._data_dir,
115
+ hash=dataset_module.hash,
116
+ beam_options=beam_options,
117
+ cache_dir=self._cache_dir,
118
+ base_path=dataset_module.builder_kwargs.get("base_path"),
119
+ )
120
+ )
121
+ else:
122
+ builders.append(
123
+ builder_cls(
124
+ config_name=config_name,
125
+ data_dir=self._data_dir,
126
+ beam_options=beam_options,
127
+ cache_dir=self._cache_dir,
128
+ base_path=dataset_module.builder_kwargs.get("base_path"),
129
+ **self._config_kwargs,
130
+ )
131
+ )
132
+
133
+ for builder in builders:
134
+ builder.download_and_prepare(
135
+ download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
136
+ if not self._force_redownload
137
+ else DownloadMode.FORCE_REDOWNLOAD,
138
+ download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH),
139
+ verification_mode=VerificationMode.NO_CHECKS
140
+ if self._ignore_verifications
141
+ else VerificationMode.ALL_CHECKS,
142
+ )
143
+ if self._save_infos:
144
+ builder._save_infos()
145
+
146
+ print("Apache beam run successful.")
147
+
148
+ # If save_infos=True, the dataset infos file is created next to the loaded module file.
149
+ # Let's move it to the original directory of the dataset script, to allow the user to
150
+ # upload them on S3 at the same time afterwards.
151
+ if self._save_infos:
152
+ dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)
153
+
154
+ name = Path(path).name + ".py"
155
+
156
+ combined_path = os.path.join(path, name)
157
+ if os.path.isfile(path):
158
+ dataset_dir = os.path.dirname(path)
159
+ elif os.path.isfile(combined_path):
160
+ dataset_dir = path
161
+ else: # in case of a remote dataset
162
+ print(f"Dataset Infos file saved at {dataset_infos_path}")
163
+ exit(1)
164
+
165
+ # Move datasetinfo back to the user
166
+ user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME)
167
+ copyfile(dataset_infos_path, user_dataset_infos_path)
168
+ print(f"Dataset Infos file saved at {user_dataset_infos_path}")
llmeval-env/lib/python3.10/site-packages/datasets/commands/test.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from argparse import ArgumentParser
4
+ from pathlib import Path
5
+ from shutil import copyfile, rmtree
6
+ from typing import Generator
7
+
8
+ import datasets.config
9
+ from datasets.builder import DatasetBuilder
10
+ from datasets.commands import BaseDatasetsCLICommand
11
+ from datasets.download.download_manager import DownloadMode
12
+ from datasets.load import dataset_module_factory, import_main_class
13
+ from datasets.utils.info_utils import VerificationMode
14
+ from datasets.utils.logging import ERROR, get_logger
15
+
16
+
17
+ logger = get_logger(__name__)
18
+
19
+
20
+ def _test_command_factory(args):
21
+ return TestCommand(
22
+ args.dataset,
23
+ args.name,
24
+ args.cache_dir,
25
+ args.data_dir,
26
+ args.all_configs,
27
+ args.save_info or args.save_infos,
28
+ args.ignore_verifications,
29
+ args.force_redownload,
30
+ args.clear_cache,
31
+ args.num_proc,
32
+ )
33
+
34
+
35
+ class TestCommand(BaseDatasetsCLICommand):
36
+ __test__ = False # to tell pytest it's not a test class
37
+
38
+ @staticmethod
39
+ def register_subcommand(parser: ArgumentParser):
40
+ test_parser = parser.add_parser("test", help="Test dataset implementation.")
41
+ test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
42
+ test_parser.add_argument(
43
+ "--cache_dir",
44
+ type=str,
45
+ default=None,
46
+ help="Cache directory where the datasets are stored.",
47
+ )
48
+ test_parser.add_argument(
49
+ "--data_dir",
50
+ type=str,
51
+ default=None,
52
+ help="Can be used to specify a manual directory to get the files from.",
53
+ )
54
+ test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
55
+ test_parser.add_argument(
56
+ "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)"
57
+ )
58
+ test_parser.add_argument(
59
+ "--ignore_verifications",
60
+ action="store_true",
61
+ help="Run the test without checksums and splits checks.",
62
+ )
63
+ test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
64
+ test_parser.add_argument(
65
+ "--clear_cache",
66
+ action="store_true",
67
+ help="Remove downloaded files and cached datasets after each config test",
68
+ )
69
+ test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes")
70
+ # aliases
71
+ test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info")
72
+ test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
73
+ test_parser.set_defaults(func=_test_command_factory)
74
+
75
+ def __init__(
76
+ self,
77
+ dataset: str,
78
+ name: str,
79
+ cache_dir: str,
80
+ data_dir: str,
81
+ all_configs: bool,
82
+ save_infos: bool,
83
+ ignore_verifications: bool,
84
+ force_redownload: bool,
85
+ clear_cache: bool,
86
+ num_proc: int,
87
+ ):
88
+ self._dataset = dataset
89
+ self._name = name
90
+ self._cache_dir = cache_dir
91
+ self._data_dir = data_dir
92
+ self._all_configs = all_configs
93
+ self._save_infos = save_infos
94
+ self._ignore_verifications = ignore_verifications
95
+ self._force_redownload = force_redownload
96
+ self._clear_cache = clear_cache
97
+ self._num_proc = num_proc
98
+ if clear_cache and not cache_dir:
99
+ print(
100
+ "When --clear_cache is used, specifying a cache directory is mandatory.\n"
101
+ "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n"
102
+ "Please provide a --cache_dir that will be used to test the dataset script."
103
+ )
104
+ exit(1)
105
+ if save_infos:
106
+ self._ignore_verifications = True
107
+
108
+ def run(self):
109
+ logging.getLogger("filelock").setLevel(ERROR)
110
+ if self._name is not None and self._all_configs:
111
+ print("Both parameters `config` and `all_configs` can't be used at once.")
112
+ exit(1)
113
+ path, config_name = self._dataset, self._name
114
+ module = dataset_module_factory(path)
115
+ builder_cls = import_main_class(module.module_path)
116
+ n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1
117
+
118
+ def get_builders() -> Generator[DatasetBuilder, None, None]:
119
+ if self._all_configs and builder_cls.BUILDER_CONFIGS:
120
+ for i, config in enumerate(builder_cls.BUILDER_CONFIGS):
121
+ if "config_name" in module.builder_kwargs:
122
+ yield builder_cls(
123
+ cache_dir=self._cache_dir,
124
+ data_dir=self._data_dir,
125
+ **module.builder_kwargs,
126
+ )
127
+ else:
128
+ yield builder_cls(
129
+ config_name=config.name,
130
+ cache_dir=self._cache_dir,
131
+ data_dir=self._data_dir,
132
+ **module.builder_kwargs,
133
+ )
134
+ else:
135
+ if "config_name" in module.builder_kwargs:
136
+ yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs)
137
+ else:
138
+ yield builder_cls(
139
+ config_name=config_name,
140
+ cache_dir=self._cache_dir,
141
+ data_dir=self._data_dir,
142
+ **module.builder_kwargs,
143
+ )
144
+
145
+ for j, builder in enumerate(get_builders()):
146
+ print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})")
147
+ builder._record_infos = os.path.exists(
148
+ os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME)
149
+ ) # record checksums only if we need to update a (deprecated) dataset_infos.json
150
+ builder.download_and_prepare(
151
+ download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
152
+ if not self._force_redownload
153
+ else DownloadMode.FORCE_REDOWNLOAD,
154
+ verification_mode=VerificationMode.NO_CHECKS
155
+ if self._ignore_verifications
156
+ else VerificationMode.ALL_CHECKS,
157
+ try_from_hf_gcs=False,
158
+ num_proc=self._num_proc,
159
+ )
160
+ builder.as_dataset()
161
+ if self._save_infos:
162
+ builder._save_infos()
163
+
164
+ # If save_infos=True, the dataset card (README.md) is created next to the loaded module file.
165
+ # The dataset_infos are saved in the YAML part of the README.md
166
+
167
+ # Let's move it to the original directory of the dataset script, to allow the user to
168
+ # upload them on S3 at the same time afterwards.
169
+ if self._save_infos:
170
+ dataset_readme_path = os.path.join(
171
+ builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME
172
+ )
173
+ name = Path(path).name + ".py"
174
+ combined_path = os.path.join(path, name)
175
+ if os.path.isfile(path):
176
+ dataset_dir = os.path.dirname(path)
177
+ elif os.path.isfile(combined_path):
178
+ dataset_dir = path
179
+ elif os.path.isdir(path): # for local directories containing only data files
180
+ dataset_dir = path
181
+ else: # in case of a remote dataset
182
+ dataset_dir = None
183
+ print(f"Dataset card saved at {dataset_readme_path}")
184
+
185
+ # Move dataset_info back to the user
186
+ if dataset_dir is not None:
187
+ user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME)
188
+ copyfile(dataset_readme_path, user_dataset_readme_path)
189
+ print(f"Dataset card saved at {user_dataset_readme_path}")
190
+
191
+ # If clear_cache=True, the download folder and the dataset builder cache directory are deleted
192
+ if self._clear_cache:
193
+ if os.path.isdir(builder._cache_dir):
194
+ logger.warning(f"Clearing cache at {builder._cache_dir}")
195
+ rmtree(builder._cache_dir)
196
+ download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR)
197
+ if os.path.isdir(download_dir):
198
+ logger.warning(f"Clearing cache at {download_dir}")
199
+ rmtree(download_dir)
200
+
201
+ print("Test successful.")
llmeval-env/lib/python3.10/site-packages/datasets/data_files.py ADDED
@@ -0,0 +1,821 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from functools import partial
4
+ from glob import has_magic
5
+ from pathlib import Path, PurePath
6
+ from typing import Callable, Dict, List, Optional, Set, Tuple, Union
7
+
8
+ import huggingface_hub
9
+ from fsspec.core import url_to_fs
10
+ from fsspec.implementations.http import HTTPFileSystem
11
+ from huggingface_hub import HfFileSystem
12
+ from packaging import version
13
+ from tqdm.contrib.concurrent import thread_map
14
+
15
+ from . import config
16
+ from .download import DownloadConfig
17
+ from .naming import _split_re
18
+ from .splits import Split
19
+ from .utils import logging
20
+ from .utils import tqdm as hf_tqdm
21
+ from .utils.file_utils import _prepare_path_and_storage_options, is_local_path, is_relative_path, xbasename, xjoin
22
+ from .utils.py_utils import glob_pattern_to_regex, string_to_dict
23
+
24
+
25
+ SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN)
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ class Url(str):
32
+ pass
33
+
34
+
35
+ class EmptyDatasetError(FileNotFoundError):
36
+ pass
37
+
38
+
39
+ SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"
40
+
41
+ SPLIT_KEYWORDS = {
42
+ Split.TRAIN: ["train", "training"],
43
+ Split.VALIDATION: ["validation", "valid", "dev", "val"],
44
+ Split.TEST: ["test", "testing", "eval", "evaluation"],
45
+ }
46
+ NON_WORDS_CHARS = "-._ 0-9"
47
+ if config.FSSPEC_VERSION < version.parse("2023.9.0"):
48
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"]
49
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
50
+ "{keyword}/**",
51
+ "{keyword}[{sep}]*/**",
52
+ "**[{sep}/]{keyword}/**",
53
+ "**[{sep}/]{keyword}[{sep}]*/**",
54
+ ]
55
+ elif config.FSSPEC_VERSION < version.parse("2023.12.0"):
56
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**/*[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"]
57
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
58
+ "{keyword}/**/*",
59
+ "{keyword}[{sep}]*/**/*",
60
+ "**/*[{sep}/]{keyword}/**/*",
61
+ "**/*[{sep}/]{keyword}[{sep}]*/**/*",
62
+ ]
63
+ else:
64
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**/{keyword}[{sep}]*", "**/*[{sep}]{keyword}[{sep}]*"]
65
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
66
+ "**/{keyword}/**",
67
+ "**/{keyword}[{sep}]*/**",
68
+ "**/*[{sep}]{keyword}/**",
69
+ "**/*[{sep}]{keyword}[{sep}]*/**",
70
+ ]
71
+
72
+ DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST]
73
+ DEFAULT_PATTERNS_SPLIT_IN_FILENAME = {
74
+ split: [
75
+ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
76
+ for keyword in SPLIT_KEYWORDS[split]
77
+ for pattern in KEYWORDS_IN_FILENAME_BASE_PATTERNS
78
+ ]
79
+ for split in DEFAULT_SPLITS
80
+ }
81
+ DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME = {
82
+ split: [
83
+ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
84
+ for keyword in SPLIT_KEYWORDS[split]
85
+ for pattern in KEYWORDS_IN_DIR_NAME_BASE_PATTERNS
86
+ ]
87
+ for split in DEFAULT_SPLITS
88
+ }
89
+
90
+
91
+ DEFAULT_PATTERNS_ALL = {
92
+ Split.TRAIN: ["**"],
93
+ }
94
+
95
+ ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED]
96
+ ALL_DEFAULT_PATTERNS = [
97
+ DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME,
98
+ DEFAULT_PATTERNS_SPLIT_IN_FILENAME,
99
+ DEFAULT_PATTERNS_ALL,
100
+ ]
101
+ if config.FSSPEC_VERSION < version.parse("2023.9.0"):
102
+ METADATA_PATTERNS = [
103
+ "metadata.csv",
104
+ "**/metadata.csv",
105
+ "metadata.jsonl",
106
+ "**/metadata.jsonl",
107
+ ] # metadata file for ImageFolder and AudioFolder
108
+ else:
109
+ METADATA_PATTERNS = [
110
+ "**/metadata.csv",
111
+ "**/metadata.jsonl",
112
+ ] # metadata file for ImageFolder and AudioFolder
113
+ WILDCARD_CHARACTERS = "*[]"
114
+ FILES_TO_IGNORE = [
115
+ "README.md",
116
+ "config.json",
117
+ "dataset_info.json",
118
+ "dataset_infos.json",
119
+ "dummy_data.zip",
120
+ "dataset_dict.json",
121
+ ]
122
+
123
+
124
+ def contains_wildcards(pattern: str) -> bool:
125
+ return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS)
126
+
127
+
128
+ def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]:
129
+ """
130
+ Take the data_files patterns from the user, and format them into a dictionary.
131
+ Each key is the name of the split, and each value is a list of data files patterns (paths or urls).
132
+ The default split is "train".
133
+
134
+ Returns:
135
+ patterns: dictionary of split_name -> list of patterns
136
+ """
137
+ if isinstance(patterns, dict):
138
+ return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()}
139
+ elif isinstance(patterns, str):
140
+ return {SANITIZED_DEFAULT_SPLIT: [patterns]}
141
+ elif isinstance(patterns, list):
142
+ if any(isinstance(pattern, dict) for pattern in patterns):
143
+ for pattern in patterns:
144
+ if not (
145
+ isinstance(pattern, dict)
146
+ and len(pattern) == 2
147
+ and "split" in pattern
148
+ and isinstance(pattern.get("path"), (str, list))
149
+ ):
150
+ raise ValueError(
151
+ f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}"
152
+ )
153
+ splits = [pattern["split"] for pattern in patterns]
154
+ if len(set(splits)) != len(splits):
155
+ raise ValueError(f"Some splits are duplicated in data_files: {splits}")
156
+ return {
157
+ str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]]
158
+ for pattern in patterns
159
+ }
160
+ else:
161
+ return {SANITIZED_DEFAULT_SPLIT: patterns}
162
+ else:
163
+ return sanitize_patterns(list(patterns))
164
+
165
+
166
+ def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool:
167
+ """
168
+ When a path matches a pattern, we additionnally check if it's inside a special directory
169
+ we ignore by default (if it starts with a double underscore).
170
+
171
+ Users can still explicitly request a filepath inside such a directory if "__pycache__" is
172
+ mentioned explicitly in the requested pattern.
173
+
174
+ Some examples:
175
+
176
+ base directory:
177
+
178
+ ./
179
+ └── __pycache__
180
+ └── b.txt
181
+
182
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**")
183
+ True
184
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt")
185
+ True
186
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*")
187
+ False
188
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*")
189
+ False
190
+ """
191
+ # We just need to check if every special directories from the path is present explicly in the pattern.
192
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
193
+ # the parent path and the parent pattern have the same number of special directories.
194
+ data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")]
195
+ data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")]
196
+ return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern)
197
+
198
+
199
+ def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool:
200
+ """
201
+ When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside
202
+ a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot.
203
+
204
+ Users can still explicitly request a filepath that is hidden or is inside a hidden directory
205
+ if the hidden part is mentioned explicitly in the requested pattern.
206
+
207
+ Some examples:
208
+
209
+ base directory:
210
+
211
+ ./
212
+ └── .hidden_file.txt
213
+
214
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**")
215
+ True
216
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*")
217
+ False
218
+
219
+ base directory:
220
+
221
+ ./
222
+ └── .hidden_dir
223
+ └── a.txt
224
+
225
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**")
226
+ True
227
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*")
228
+ False
229
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*")
230
+ False
231
+
232
+ base directory:
233
+
234
+ ./
235
+ └── .hidden_dir
236
+ └── .hidden_file.txt
237
+
238
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**")
239
+ True
240
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*")
241
+ True
242
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*")
243
+ False
244
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*")
245
+ True
246
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*")
247
+ False
248
+ """
249
+ # We just need to check if every hidden part from the path is present explicly in the pattern.
250
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
251
+ # the path and the pattern have the same number of hidden parts.
252
+ hidden_directories_in_path = [
253
+ part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."}
254
+ ]
255
+ hidden_directories_in_pattern = [
256
+ part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."}
257
+ ]
258
+ return len(hidden_directories_in_path) != len(hidden_directories_in_pattern)
259
+
260
+
261
+ def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]:
262
+ """
263
+ Get the default pattern from a directory or repository by testing all the supported patterns.
264
+ The first patterns to return a non-empty list of data files is returned.
265
+
266
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
267
+ """
268
+ # first check the split patterns like data/{split}-00000-of-00001.parquet
269
+ for split_pattern in ALL_SPLIT_PATTERNS:
270
+ pattern = split_pattern.replace("{split}", "*")
271
+ try:
272
+ data_files = pattern_resolver(pattern)
273
+ except FileNotFoundError:
274
+ continue
275
+ if len(data_files) > 0:
276
+ splits: Set[str] = {
277
+ string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))["split"]
278
+ for p in data_files
279
+ }
280
+ if any(not re.match(_split_re, split) for split in splits):
281
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{splits}'.")
282
+ sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted(
283
+ splits - set(DEFAULT_SPLITS)
284
+ )
285
+ return {split: [split_pattern.format(split=split)] for split in sorted_splits}
286
+ # then check the default patterns based on train/valid/test splits
287
+ for patterns_dict in ALL_DEFAULT_PATTERNS:
288
+ non_empty_splits = []
289
+ for split, patterns in patterns_dict.items():
290
+ for pattern in patterns:
291
+ try:
292
+ data_files = pattern_resolver(pattern)
293
+ except FileNotFoundError:
294
+ continue
295
+ if len(data_files) > 0:
296
+ non_empty_splits.append(split)
297
+ break
298
+ if non_empty_splits:
299
+ return {split: patterns_dict[split] for split in non_empty_splits}
300
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
301
+
302
+
303
+ def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]:
304
+ """
305
+ Get the supported metadata patterns from a directory or repository.
306
+ """
307
+ non_empty_patterns = []
308
+ for pattern in METADATA_PATTERNS:
309
+ try:
310
+ metadata_files = pattern_resolver(pattern)
311
+ if len(metadata_files) > 0:
312
+ non_empty_patterns.append(pattern)
313
+ except FileNotFoundError:
314
+ pass
315
+ if non_empty_patterns:
316
+ return non_empty_patterns
317
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
318
+
319
+
320
+ def resolve_pattern(
321
+ pattern: str,
322
+ base_path: str,
323
+ allowed_extensions: Optional[List[str]] = None,
324
+ download_config: Optional[DownloadConfig] = None,
325
+ ) -> List[str]:
326
+ """
327
+ Resolve the paths and URLs of the data files from the pattern passed by the user.
328
+
329
+ You can use patterns to resolve multiple local files. Here are a few examples:
330
+ - *.csv to match all the CSV files at the first level
331
+ - **.csv to match all the CSV files at any level
332
+ - data/* to match all the files inside "data"
333
+ - data/** to match all the files inside "data" and its subdirectories
334
+
335
+ The patterns are resolved using the fsspec glob. In fsspec>=2023.12.0 this is equivalent to
336
+ Python's glob.glob, Path.glob, Path.match and fnmatch where ** is unsupported with a prefix/suffix
337
+ other than a forward slash /.
338
+
339
+ More generally:
340
+ - '*' matches any character except a forward-slash (to match just the file or directory name)
341
+ - '**' matches any character including a forward-slash /
342
+
343
+ Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested.
344
+ The same applies to special directories that start with a double underscore like "__pycache__".
345
+ You can still include one if the pattern explicilty mentions it:
346
+ - to include a hidden file: "*/.hidden.txt" or "*/.*"
347
+ - to include a hidden directory: ".hidden/*" or ".*/*"
348
+ - to include a special directory: "__special__/*" or "__*/*"
349
+
350
+ Example::
351
+
352
+ >>> from datasets.data_files import resolve_pattern
353
+ >>> base_path = "."
354
+ >>> resolve_pattern("docs/**/*.py", base_path)
355
+ [/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py']
356
+
357
+ Args:
358
+ pattern (str): Unix pattern or paths or URLs of the data files to resolve.
359
+ The paths can be absolute or relative to base_path.
360
+ Remote filesystems using fsspec are supported, e.g. with the hf:// protocol.
361
+ base_path (str): Base path to use when resolving relative paths.
362
+ allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions).
363
+ For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"]
364
+ Returns:
365
+ List[str]: List of paths or URLs to the local or remote files that match the patterns.
366
+ """
367
+ if is_relative_path(pattern):
368
+ pattern = xjoin(base_path, pattern)
369
+ elif is_local_path(pattern):
370
+ base_path = os.path.splitdrive(pattern)[0] + os.sep
371
+ else:
372
+ base_path = ""
373
+ pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config)
374
+ fs, fs_pattern = url_to_fs(pattern, **storage_options)
375
+ files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)}
376
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]
377
+ protocol_prefix = protocol + "://" if protocol != "file" else ""
378
+ glob_kwargs = {}
379
+ if protocol == "hf" and config.HF_HUB_VERSION >= version.parse("0.20.0"):
380
+ # 10 times faster glob with detail=True (ignores costly info like lastCommit)
381
+ glob_kwargs["expand_info"] = False
382
+ matched_paths = [
383
+ filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath
384
+ for filepath, info in fs.glob(pattern, detail=True, **glob_kwargs).items()
385
+ if info["type"] == "file"
386
+ and (xbasename(filepath) not in files_to_ignore)
387
+ and not _is_inside_unrequested_special_dir(filepath, fs_pattern)
388
+ and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(filepath, fs_pattern)
389
+ ] # ignore .ipynb and __pycache__, but keep /../
390
+ if allowed_extensions is not None:
391
+ out = [
392
+ filepath
393
+ for filepath in matched_paths
394
+ if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:])
395
+ ]
396
+ if len(out) < len(matched_paths):
397
+ invalid_matched_files = list(set(matched_paths) - set(out))
398
+ logger.info(
399
+ f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}"
400
+ )
401
+ else:
402
+ out = matched_paths
403
+ if not out:
404
+ error_msg = f"Unable to find '{pattern}'"
405
+ if allowed_extensions is not None:
406
+ error_msg += f" with any supported extension {list(allowed_extensions)}"
407
+ raise FileNotFoundError(error_msg)
408
+ return out
409
+
410
+
411
+ def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]:
412
+ """
413
+ Get the default pattern from a directory testing all the supported patterns.
414
+ The first patterns to return a non-empty list of data files is returned.
415
+
416
+ Some examples of supported patterns:
417
+
418
+ Input:
419
+
420
+ my_dataset_repository/
421
+ ├── README.md
422
+ └── dataset.csv
423
+
424
+ Output:
425
+
426
+ {'train': ['**']}
427
+
428
+ Input:
429
+
430
+ my_dataset_repository/
431
+ ├── README.md
432
+ ├── train.csv
433
+ └── test.csv
434
+
435
+ my_dataset_repository/
436
+ ├── README.md
437
+ └── data/
438
+ ├── train.csv
439
+ └── test.csv
440
+
441
+ my_dataset_repository/
442
+ ├── README.md
443
+ ├── train_0.csv
444
+ ├── train_1.csv
445
+ ├── train_2.csv
446
+ ├── train_3.csv
447
+ ├── test_0.csv
448
+ └── test_1.csv
449
+
450
+ Output:
451
+
452
+ {'train': ['**/train[-._ 0-9]*', '**/*[-._ 0-9]train[-._ 0-9]*', '**/training[-._ 0-9]*', '**/*[-._ 0-9]training[-._ 0-9]*'],
453
+ 'test': ['**/test[-._ 0-9]*', '**/*[-._ 0-9]test[-._ 0-9]*', '**/testing[-._ 0-9]*', '**/*[-._ 0-9]testing[-._ 0-9]*', ...]}
454
+
455
+ Input:
456
+
457
+ my_dataset_repository/
458
+ ├── README.md
459
+ └── data/
460
+ ├── train/
461
+ │ ├── shard_0.csv
462
+ │ ├── shard_1.csv
463
+ │ ├── shard_2.csv
464
+ │ └── shard_3.csv
465
+ └── test/
466
+ ├── shard_0.csv
467
+ └── shard_1.csv
468
+
469
+ Output:
470
+
471
+ {'train': ['**/train/**', '**/train[-._ 0-9]*/**', '**/*[-._ 0-9]train/**', '**/*[-._ 0-9]train[-._ 0-9]*/**', ...],
472
+ 'test': ['**/test/**', '**/test[-._ 0-9]*/**', '**/*[-._ 0-9]test/**', '**/*[-._ 0-9]test[-._ 0-9]*/**', ...]}
473
+
474
+ Input:
475
+
476
+ my_dataset_repository/
477
+ ├── README.md
478
+ └── data/
479
+ ├── train-00000-of-00003.csv
480
+ ├── train-00001-of-00003.csv
481
+ ├── train-00002-of-00003.csv
482
+ ├── test-00000-of-00001.csv
483
+ ├── random-00000-of-00003.csv
484
+ ├── random-00001-of-00003.csv
485
+ └── random-00002-of-00003.csv
486
+
487
+ Output:
488
+
489
+ {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
490
+ 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
491
+ 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']}
492
+
493
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
494
+ """
495
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
496
+ try:
497
+ return _get_data_files_patterns(resolver)
498
+ except FileNotFoundError:
499
+ raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None
500
+
501
+
502
+ def get_metadata_patterns(
503
+ base_path: str,
504
+ download_config: Optional[DownloadConfig] = None,
505
+ ) -> List[str]:
506
+ """
507
+ Get the supported metadata patterns from a local directory.
508
+ """
509
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
510
+ try:
511
+ return _get_metadata_files_patterns(resolver)
512
+ except FileNotFoundError:
513
+ raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None
514
+
515
+
516
+ def _get_single_origin_metadata(
517
+ data_file: str,
518
+ download_config: Optional[DownloadConfig] = None,
519
+ ) -> Tuple[str]:
520
+ data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config)
521
+ fs, *_ = url_to_fs(data_file, **storage_options)
522
+ if isinstance(fs, HfFileSystem):
523
+ resolved_path = fs.resolve_path(data_file)
524
+ return (resolved_path.repo_id, resolved_path.revision)
525
+ elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT):
526
+ hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
527
+ data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
528
+ resolved_path = hffs.resolve_path(data_file)
529
+ return (resolved_path.repo_id, resolved_path.revision)
530
+ info = fs.info(data_file)
531
+ # s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime
532
+ for key in ["ETag", "etag", "mtime"]:
533
+ if key in info:
534
+ return (str(info[key]),)
535
+ return ()
536
+
537
+
538
+ def _get_origin_metadata(
539
+ data_files: List[str],
540
+ download_config: Optional[DownloadConfig] = None,
541
+ max_workers: Optional[int] = None,
542
+ ) -> Tuple[str]:
543
+ max_workers = max_workers if max_workers is not None else config.HF_DATASETS_MULTITHREADING_MAX_WORKERS
544
+ return thread_map(
545
+ partial(_get_single_origin_metadata, download_config=download_config),
546
+ data_files,
547
+ max_workers=max_workers,
548
+ tqdm_class=hf_tqdm,
549
+ desc="Resolving data files",
550
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
551
+ disable=len(data_files) <= 16 or None,
552
+ )
553
+
554
+
555
+ class DataFilesList(List[str]):
556
+ """
557
+ List of data files (absolute local paths or URLs).
558
+ It has two construction methods given the user's data files patterns :
559
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
560
+ - ``from_local_or_remote``: resolve patterns from a local path
561
+
562
+ Moreover DataFilesList has an additional attribute ``origin_metadata``.
563
+ It can store:
564
+ - the last modified time of local files
565
+ - ETag of remote files
566
+ - commit sha of a dataset repository
567
+
568
+ Thanks to this additional attribute, it is possible to hash the list
569
+ and get a different hash if and only if at least one file changed.
570
+ This is useful for caching Dataset objects that are obtained from a list of data files.
571
+ """
572
+
573
+ def __init__(self, data_files: List[str], origin_metadata: List[Tuple[str]]):
574
+ super().__init__(data_files)
575
+ self.origin_metadata = origin_metadata
576
+
577
+ def __add__(self, other):
578
+ return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata)
579
+
580
+ @classmethod
581
+ def from_hf_repo(
582
+ cls,
583
+ patterns: List[str],
584
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
585
+ base_path: Optional[str] = None,
586
+ allowed_extensions: Optional[List[str]] = None,
587
+ download_config: Optional[DownloadConfig] = None,
588
+ ) -> "DataFilesList":
589
+ base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/")
590
+ return cls.from_patterns(
591
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
592
+ )
593
+
594
+ @classmethod
595
+ def from_local_or_remote(
596
+ cls,
597
+ patterns: List[str],
598
+ base_path: Optional[str] = None,
599
+ allowed_extensions: Optional[List[str]] = None,
600
+ download_config: Optional[DownloadConfig] = None,
601
+ ) -> "DataFilesList":
602
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
603
+ return cls.from_patterns(
604
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
605
+ )
606
+
607
+ @classmethod
608
+ def from_patterns(
609
+ cls,
610
+ patterns: List[str],
611
+ base_path: Optional[str] = None,
612
+ allowed_extensions: Optional[List[str]] = None,
613
+ download_config: Optional[DownloadConfig] = None,
614
+ ) -> "DataFilesList":
615
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
616
+ data_files = []
617
+ for pattern in patterns:
618
+ try:
619
+ data_files.extend(
620
+ resolve_pattern(
621
+ pattern,
622
+ base_path=base_path,
623
+ allowed_extensions=allowed_extensions,
624
+ download_config=download_config,
625
+ )
626
+ )
627
+ except FileNotFoundError:
628
+ if not has_magic(pattern):
629
+ raise
630
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
631
+ return cls(data_files, origin_metadata)
632
+
633
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
634
+ pattern = "|".join("\\" + ext for ext in extensions)
635
+ pattern = re.compile(f".*({pattern})(\\..+)?$")
636
+ return DataFilesList(
637
+ [data_file for data_file in self if pattern.match(data_file)],
638
+ origin_metadata=self.origin_metadata,
639
+ )
640
+
641
+
642
+ class DataFilesDict(Dict[str, DataFilesList]):
643
+ """
644
+ Dict of split_name -> list of data files (absolute local paths or URLs).
645
+ It has two construction methods given the user's data files patterns :
646
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
647
+ - ``from_local_or_remote``: resolve patterns from a local path
648
+
649
+ Moreover each list is a DataFilesList. It is possible to hash the dictionary
650
+ and get a different hash if and only if at least one file changed.
651
+ For more info, see ``DataFilesList``.
652
+
653
+ This is useful for caching Dataset objects that are obtained from a list of data files.
654
+
655
+ Changing the order of the keys of this dictionary also doesn't change its hash.
656
+ """
657
+
658
+ @classmethod
659
+ def from_local_or_remote(
660
+ cls,
661
+ patterns: Dict[str, Union[List[str], DataFilesList]],
662
+ base_path: Optional[str] = None,
663
+ allowed_extensions: Optional[List[str]] = None,
664
+ download_config: Optional[DownloadConfig] = None,
665
+ ) -> "DataFilesDict":
666
+ out = cls()
667
+ for key, patterns_for_key in patterns.items():
668
+ out[key] = (
669
+ DataFilesList.from_local_or_remote(
670
+ patterns_for_key,
671
+ base_path=base_path,
672
+ allowed_extensions=allowed_extensions,
673
+ download_config=download_config,
674
+ )
675
+ if not isinstance(patterns_for_key, DataFilesList)
676
+ else patterns_for_key
677
+ )
678
+ return out
679
+
680
+ @classmethod
681
+ def from_hf_repo(
682
+ cls,
683
+ patterns: Dict[str, Union[List[str], DataFilesList]],
684
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
685
+ base_path: Optional[str] = None,
686
+ allowed_extensions: Optional[List[str]] = None,
687
+ download_config: Optional[DownloadConfig] = None,
688
+ ) -> "DataFilesDict":
689
+ out = cls()
690
+ for key, patterns_for_key in patterns.items():
691
+ out[key] = (
692
+ DataFilesList.from_hf_repo(
693
+ patterns_for_key,
694
+ dataset_info=dataset_info,
695
+ base_path=base_path,
696
+ allowed_extensions=allowed_extensions,
697
+ download_config=download_config,
698
+ )
699
+ if not isinstance(patterns_for_key, DataFilesList)
700
+ else patterns_for_key
701
+ )
702
+ return out
703
+
704
+ @classmethod
705
+ def from_patterns(
706
+ cls,
707
+ patterns: Dict[str, Union[List[str], DataFilesList]],
708
+ base_path: Optional[str] = None,
709
+ allowed_extensions: Optional[List[str]] = None,
710
+ download_config: Optional[DownloadConfig] = None,
711
+ ) -> "DataFilesDict":
712
+ out = cls()
713
+ for key, patterns_for_key in patterns.items():
714
+ out[key] = (
715
+ DataFilesList.from_patterns(
716
+ patterns_for_key,
717
+ base_path=base_path,
718
+ allowed_extensions=allowed_extensions,
719
+ download_config=download_config,
720
+ )
721
+ if not isinstance(patterns_for_key, DataFilesList)
722
+ else patterns_for_key
723
+ )
724
+ return out
725
+
726
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
727
+ out = type(self)()
728
+ for key, data_files_list in self.items():
729
+ out[key] = data_files_list.filter_extensions(extensions)
730
+ return out
731
+
732
+
733
+ class DataFilesPatternsList(List[str]):
734
+ """
735
+ List of data files patterns (absolute local paths or URLs).
736
+ For each pattern there should also be a list of allowed extensions
737
+ to keep, or a None ot keep all the files for the pattern.
738
+ """
739
+
740
+ def __init__(
741
+ self,
742
+ patterns: List[str],
743
+ allowed_extensions: List[Optional[List[str]]],
744
+ ):
745
+ super().__init__(patterns)
746
+ self.allowed_extensions = allowed_extensions
747
+
748
+ def __add__(self, other):
749
+ return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions)
750
+
751
+ @classmethod
752
+ def from_patterns(
753
+ cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None
754
+ ) -> "DataFilesPatternsDict":
755
+ return cls(patterns, [allowed_extensions] * len(patterns))
756
+
757
+ def resolve(
758
+ self,
759
+ base_path: str,
760
+ download_config: Optional[DownloadConfig] = None,
761
+ ) -> "DataFilesList":
762
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
763
+ data_files = []
764
+ for pattern, allowed_extensions in zip(self, self.allowed_extensions):
765
+ try:
766
+ data_files.extend(
767
+ resolve_pattern(
768
+ pattern,
769
+ base_path=base_path,
770
+ allowed_extensions=allowed_extensions,
771
+ download_config=download_config,
772
+ )
773
+ )
774
+ except FileNotFoundError:
775
+ if not has_magic(pattern):
776
+ raise
777
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
778
+ return DataFilesList(data_files, origin_metadata)
779
+
780
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
781
+ return DataFilesPatternsList(
782
+ self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions]
783
+ )
784
+
785
+
786
+ class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]):
787
+ """
788
+ Dict of split_name -> list of data files patterns (absolute local paths or URLs).
789
+ """
790
+
791
+ @classmethod
792
+ def from_patterns(
793
+ cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None
794
+ ) -> "DataFilesPatternsDict":
795
+ out = cls()
796
+ for key, patterns_for_key in patterns.items():
797
+ out[key] = (
798
+ DataFilesPatternsList.from_patterns(
799
+ patterns_for_key,
800
+ allowed_extensions=allowed_extensions,
801
+ )
802
+ if not isinstance(patterns_for_key, DataFilesPatternsList)
803
+ else patterns_for_key
804
+ )
805
+ return out
806
+
807
+ def resolve(
808
+ self,
809
+ base_path: str,
810
+ download_config: Optional[DownloadConfig] = None,
811
+ ) -> "DataFilesDict":
812
+ out = DataFilesDict()
813
+ for key, data_files_patterns_list in self.items():
814
+ out[key] = data_files_patterns_list.resolve(base_path, download_config)
815
+ return out
816
+
817
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict":
818
+ out = type(self)()
819
+ for key, data_files_patterns_list in self.items():
820
+ out[key] = data_files_patterns_list.filter_extensions(extensions)
821
+ return out
llmeval-env/lib/python3.10/site-packages/datasets/download/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = [
2
+ "DownloadConfig",
3
+ "DownloadManager",
4
+ "DownloadMode",
5
+ "StreamingDownloadManager",
6
+ ]
7
+
8
+ from .download_config import DownloadConfig
9
+ from .download_manager import DownloadManager, DownloadMode
10
+ from .streaming_download_manager import StreamingDownloadManager
llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (439 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc ADDED
Binary file (5.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc ADDED
Binary file (8.03 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc ADDED
Binary file (7.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/download/download_config.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import warnings
3
+ from dataclasses import InitVar, dataclass, field
4
+ from pathlib import Path
5
+ from typing import Any, Dict, Optional, Union
6
+
7
+ from .. import config
8
+
9
+
10
+ @dataclass
11
+ class DownloadConfig:
12
+ """Configuration for our cached path manager.
13
+
14
+ Attributes:
15
+ cache_dir (`str` or `Path`, *optional*):
16
+ Specify a cache directory to save the file to (overwrite the
17
+ default cache dir).
18
+ force_download (`bool`, defaults to `False`):
19
+ If `True`, re-dowload the file even if it's already cached in
20
+ the cache dir.
21
+ resume_download (`bool`, defaults to `False`):
22
+ If `True`, resume the download if an incompletely received file is
23
+ found.
24
+ proxies (`dict`, *optional*):
25
+ user_agent (`str`, *optional*):
26
+ Optional string or dict that will be appended to the user-agent on remote
27
+ requests.
28
+ extract_compressed_file (`bool`, defaults to `False`):
29
+ If `True` and the path point to a zip or tar file,
30
+ extract the compressed file in a folder along the archive.
31
+ force_extract (`bool`, defaults to `False`):
32
+ If `True` when `extract_compressed_file` is `True` and the archive
33
+ was already extracted, re-extract the archive and override the folder where it was extracted.
34
+ delete_extracted (`bool`, defaults to `False`):
35
+ Whether to delete (or keep) the extracted files.
36
+ extract_on_the_fly (`bool`, defaults to `False`):
37
+ If `True`, extract compressed files while they are being read.
38
+ use_etag (`bool`, defaults to `True`):
39
+ Whether to use the ETag HTTP response header to validate the cached files.
40
+ num_proc (`int`, *optional*):
41
+ The number of processes to launch to download the files in parallel.
42
+ max_retries (`int`, default to `1`):
43
+ The number of times to retry an HTTP request if it fails.
44
+ token (`str` or `bool`, *optional*):
45
+ Optional string or boolean to use as Bearer token
46
+ for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
47
+ use_auth_token (`str` or `bool`, *optional*):
48
+ Optional string or boolean to use as Bearer token
49
+ for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
50
+
51
+ <Deprecated version="2.14.0">
52
+
53
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
54
+
55
+ </Deprecated>
56
+
57
+ ignore_url_params (`bool`, defaults to `False`):
58
+ Whether to strip all query parameters and fragments from
59
+ the download URL before using it for caching the file.
60
+ storage_options (`dict`, *optional*):
61
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
62
+ download_desc (`str`, *optional*):
63
+ A description to be displayed alongside with the progress bar while downloading the files.
64
+ disable_tqdm (`bool`, defaults to `False`):
65
+ Whether to disable the individual files download progress bar
66
+ """
67
+
68
+ cache_dir: Optional[Union[str, Path]] = None
69
+ force_download: bool = False
70
+ resume_download: bool = False
71
+ local_files_only: bool = False
72
+ proxies: Optional[Dict] = None
73
+ user_agent: Optional[str] = None
74
+ extract_compressed_file: bool = False
75
+ force_extract: bool = False
76
+ delete_extracted: bool = False
77
+ extract_on_the_fly: bool = False
78
+ use_etag: bool = True
79
+ num_proc: Optional[int] = None
80
+ max_retries: int = 1
81
+ token: Optional[Union[str, bool]] = None
82
+ use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
83
+ ignore_url_params: bool = False
84
+ storage_options: Dict[str, Any] = field(default_factory=dict)
85
+ download_desc: Optional[str] = None
86
+ disable_tqdm: bool = False
87
+
88
+ def __post_init__(self, use_auth_token):
89
+ if use_auth_token != "deprecated":
90
+ warnings.warn(
91
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
92
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
93
+ FutureWarning,
94
+ )
95
+ self.token = use_auth_token
96
+ if "hf" not in self.storage_options:
97
+ self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
98
+
99
+ def copy(self) -> "DownloadConfig":
100
+ return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
101
+
102
+ def __setattr__(self, name, value):
103
+ if name == "token" and getattr(self, "storage_options", None) is not None:
104
+ if "hf" not in self.storage_options:
105
+ self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
106
+ elif getattr(self.storage_options["hf"], "token", None) is None:
107
+ self.storage_options["hf"]["token"] = value
108
+ super().__setattr__(name, value)
llmeval-env/lib/python3.10/site-packages/datasets/download/download_manager.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Download manager interface."""
17
+
18
+ import enum
19
+ import io
20
+ import multiprocessing
21
+ import os
22
+ import posixpath
23
+ import warnings
24
+ from datetime import datetime
25
+ from functools import partial
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import fsspec
29
+ from fsspec.core import url_to_fs
30
+ from tqdm.contrib.concurrent import thread_map
31
+
32
+ from .. import config
33
+ from ..utils import tqdm as hf_tqdm
34
+ from ..utils.deprecation_utils import DeprecatedEnum, deprecated
35
+ from ..utils.file_utils import (
36
+ ArchiveIterable,
37
+ FilesIterable,
38
+ cached_path,
39
+ get_from_cache,
40
+ hash_url_to_filename,
41
+ is_relative_path,
42
+ stack_multiprocessing_download_progress_bars,
43
+ url_or_path_join,
44
+ )
45
+ from ..utils.info_utils import get_size_checksum_dict
46
+ from ..utils.logging import get_logger, tqdm
47
+ from ..utils.py_utils import NestedDataStructure, map_nested, size_str
48
+ from ..utils.track import tracked_str
49
+ from .download_config import DownloadConfig
50
+
51
+
52
+ logger = get_logger(__name__)
53
+
54
+
55
+ class DownloadMode(enum.Enum):
56
+ """`Enum` for how to treat pre-existing downloads and data.
57
+
58
+ The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
59
+ raw downloads and the prepared dataset if they exist.
60
+
61
+ The generations modes:
62
+
63
+ | | Downloads | Dataset |
64
+ |-------------------------------------|-----------|---------|
65
+ | `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse |
66
+ | `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
67
+ | `FORCE_REDOWNLOAD` | Fresh | Fresh |
68
+
69
+ """
70
+
71
+ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
72
+ REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
73
+ FORCE_REDOWNLOAD = "force_redownload"
74
+
75
+
76
+ class GenerateMode(DeprecatedEnum):
77
+ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
78
+ REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
79
+ FORCE_REDOWNLOAD = "force_redownload"
80
+
81
+ @property
82
+ def help_message(self):
83
+ return "Use 'DownloadMode' instead."
84
+
85
+
86
+ class DownloadManager:
87
+ is_streaming = False
88
+
89
+ def __init__(
90
+ self,
91
+ dataset_name: Optional[str] = None,
92
+ data_dir: Optional[str] = None,
93
+ download_config: Optional[DownloadConfig] = None,
94
+ base_path: Optional[str] = None,
95
+ record_checksums=True,
96
+ ):
97
+ """Download manager constructor.
98
+
99
+ Args:
100
+ data_dir:
101
+ can be used to specify a manual directory to get the files from.
102
+ dataset_name (`str`):
103
+ name of dataset this instance will be used for. If
104
+ provided, downloads will contain which datasets they were used for.
105
+ download_config (`DownloadConfig`):
106
+ to specify the cache directory and other
107
+ download options
108
+ base_path (`str`):
109
+ base path that is used when relative paths are used to
110
+ download files. This can be a remote url.
111
+ record_checksums (`bool`, defaults to `True`):
112
+ Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder.
113
+ """
114
+ self._dataset_name = dataset_name
115
+ self._data_dir = data_dir
116
+ self._base_path = base_path or os.path.abspath(".")
117
+ # To record what is being used: {url: {num_bytes: int, checksum: str}}
118
+ self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {}
119
+ self.record_checksums = record_checksums
120
+ self.download_config = download_config or DownloadConfig()
121
+ self.downloaded_paths = {}
122
+ self.extracted_paths = {}
123
+
124
+ @property
125
+ def manual_dir(self):
126
+ return self._data_dir
127
+
128
+ @property
129
+ def downloaded_size(self):
130
+ """Returns the total size of downloaded files."""
131
+ return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
132
+
133
+ @staticmethod
134
+ def ship_files_with_pipeline(downloaded_path_or_paths, pipeline):
135
+ """Ship the files using Beam FileSystems to the pipeline temp dir.
136
+
137
+ Args:
138
+ downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`):
139
+ Nested structure containing the
140
+ downloaded path(s).
141
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
142
+ Apache Beam Pipeline.
143
+
144
+ Returns:
145
+ `str` or `list[str]` or `dict[str, str]`
146
+ """
147
+ from ..utils.beam_utils import upload_local_to_remote
148
+
149
+ remote_dir = pipeline._options.get_all_options().get("temp_location")
150
+ if remote_dir is None:
151
+ raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files")
152
+
153
+ def upload(local_file_path):
154
+ remote_file_path = posixpath.join(
155
+ remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path)
156
+ )
157
+ logger.info(
158
+ f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}."
159
+ )
160
+ upload_local_to_remote(local_file_path, remote_file_path)
161
+ return remote_file_path
162
+
163
+ uploaded_path_or_paths = map_nested(
164
+ lambda local_file_path: upload(local_file_path),
165
+ downloaded_path_or_paths,
166
+ )
167
+ return uploaded_path_or_paths
168
+
169
+ def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
170
+ """Record size/checksum of downloaded files."""
171
+ delay = 5
172
+ for url, path in hf_tqdm(
173
+ list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
174
+ delay=delay,
175
+ desc="Computing checksums",
176
+ ):
177
+ # call str to support PathLike objects
178
+ self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
179
+ path, record_checksum=self.record_checksums
180
+ )
181
+
182
+ @deprecated("Use `.download`/`.download_and_extract` with `fsspec` URLs instead.")
183
+ def download_custom(self, url_or_urls, custom_download):
184
+ """
185
+ Download given urls(s) by calling `custom_download`.
186
+
187
+ Args:
188
+ url_or_urls (`str` or `list` or `dict`):
189
+ URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
190
+ custom_download (`Callable[src_url, dst_path]`):
191
+ The source URL and destination path. For example
192
+ `tf.io.gfile.copy`, that lets you download from Google storage.
193
+
194
+ Returns:
195
+ downloaded_path(s): `str`, The downloaded paths matching the given input
196
+ `url_or_urls`.
197
+
198
+ Example:
199
+
200
+ ```py
201
+ >>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket)
202
+ ```
203
+ """
204
+ cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
205
+ max_retries = self.download_config.max_retries
206
+
207
+ def url_to_downloaded_path(url):
208
+ return os.path.join(cache_dir, hash_url_to_filename(url))
209
+
210
+ downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls)
211
+ url_or_urls = NestedDataStructure(url_or_urls)
212
+ downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
213
+ for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()):
214
+ try:
215
+ get_from_cache(
216
+ url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
217
+ )
218
+ cached = True
219
+ except FileNotFoundError:
220
+ cached = False
221
+ if not cached or self.download_config.force_download:
222
+ custom_download(url, path)
223
+ get_from_cache(
224
+ url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
225
+ )
226
+ self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
227
+ return downloaded_path_or_paths.data
228
+
229
+ def download(self, url_or_urls):
230
+ """Download given URL(s).
231
+
232
+ By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior.
233
+
234
+ Args:
235
+ url_or_urls (`str` or `list` or `dict`):
236
+ URL or `list` or `dict` of URLs to download. Each URL is a `str`.
237
+
238
+ Returns:
239
+ `str` or `list` or `dict`:
240
+ The downloaded paths matching the given input `url_or_urls`.
241
+
242
+ Example:
243
+
244
+ ```py
245
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
246
+ ```
247
+ """
248
+ download_config = self.download_config.copy()
249
+ download_config.extract_compressed_file = False
250
+ if download_config.download_desc is None:
251
+ download_config.download_desc = "Downloading data"
252
+
253
+ download_func = partial(self._download_batched, download_config=download_config)
254
+
255
+ start_time = datetime.now()
256
+ with stack_multiprocessing_download_progress_bars():
257
+ downloaded_path_or_paths = map_nested(
258
+ download_func,
259
+ url_or_urls,
260
+ map_tuple=True,
261
+ num_proc=download_config.num_proc,
262
+ desc="Downloading data files",
263
+ batched=True,
264
+ batch_size=-1,
265
+ )
266
+ duration = datetime.now() - start_time
267
+ logger.info(f"Downloading took {duration.total_seconds() // 60} min")
268
+ url_or_urls = NestedDataStructure(url_or_urls)
269
+ downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
270
+ self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())))
271
+
272
+ start_time = datetime.now()
273
+ self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
274
+ duration = datetime.now() - start_time
275
+ logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min")
276
+
277
+ return downloaded_path_or_paths.data
278
+
279
+ def _download_batched(
280
+ self,
281
+ url_or_filenames: List[str],
282
+ download_config: DownloadConfig,
283
+ ) -> List[str]:
284
+ if len(url_or_filenames) >= 16:
285
+ download_config = download_config.copy()
286
+ download_config.disable_tqdm = True
287
+ download_func = partial(self._download_single, download_config=download_config)
288
+
289
+ fs: fsspec.AbstractFileSystem
290
+ fs, path = url_to_fs(url_or_filenames[0], **download_config.storage_options)
291
+ size = 0
292
+ try:
293
+ size = fs.info(path).get("size", 0)
294
+ except Exception:
295
+ pass
296
+ max_workers = (
297
+ config.HF_DATASETS_MULTITHREADING_MAX_WORKERS if size < (20 << 20) else 1
298
+ ) # enable multithreading if files are small
299
+
300
+ return thread_map(
301
+ download_func,
302
+ url_or_filenames,
303
+ desc=download_config.download_desc or "Downloading",
304
+ unit="files",
305
+ position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
306
+ if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
307
+ and multiprocessing.current_process()._identity
308
+ else None,
309
+ max_workers=max_workers,
310
+ tqdm_class=tqdm,
311
+ )
312
+ else:
313
+ return [
314
+ self._download_single(url_or_filename, download_config=download_config)
315
+ for url_or_filename in url_or_filenames
316
+ ]
317
+
318
+ def _download_single(self, url_or_filename: str, download_config: DownloadConfig) -> str:
319
+ url_or_filename = str(url_or_filename)
320
+ if is_relative_path(url_or_filename):
321
+ # append the relative path to the base_path
322
+ url_or_filename = url_or_path_join(self._base_path, url_or_filename)
323
+ out = cached_path(url_or_filename, download_config=download_config)
324
+ out = tracked_str(out)
325
+ out.set_origin(url_or_filename)
326
+ return out
327
+
328
+ def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]):
329
+ """Iterate over files within an archive.
330
+
331
+ Args:
332
+ path_or_buf (`str` or `io.BufferedReader`):
333
+ Archive path or archive binary file object.
334
+
335
+ Yields:
336
+ `tuple[str, io.BufferedReader]`:
337
+ 2-tuple (path_within_archive, file_object).
338
+ File object is opened in binary mode.
339
+
340
+ Example:
341
+
342
+ ```py
343
+ >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
344
+ >>> files = dl_manager.iter_archive(archive)
345
+ ```
346
+ """
347
+
348
+ if hasattr(path_or_buf, "read"):
349
+ return ArchiveIterable.from_buf(path_or_buf)
350
+ else:
351
+ return ArchiveIterable.from_urlpath(path_or_buf)
352
+
353
+ def iter_files(self, paths: Union[str, List[str]]):
354
+ """Iterate over file paths.
355
+
356
+ Args:
357
+ paths (`str` or `list` of `str`):
358
+ Root paths.
359
+
360
+ Yields:
361
+ `str`: File path.
362
+
363
+ Example:
364
+
365
+ ```py
366
+ >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
367
+ >>> files = dl_manager.iter_files(files)
368
+ ```
369
+ """
370
+ return FilesIterable.from_urlpaths(paths)
371
+
372
+ def extract(self, path_or_paths, num_proc="deprecated"):
373
+ """Extract given path(s).
374
+
375
+ Args:
376
+ path_or_paths (path or `list` or `dict`):
377
+ Path of file to extract. Each path is a `str`.
378
+ num_proc (`int`):
379
+ Use multi-processing if `num_proc` > 1 and the length of
380
+ `path_or_paths` is larger than `num_proc`.
381
+
382
+ <Deprecated version="2.6.2">
383
+
384
+ Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.
385
+
386
+ </Deprecated>
387
+
388
+ Returns:
389
+ extracted_path(s): `str`, The extracted paths matching the given input
390
+ path_or_paths.
391
+
392
+ Example:
393
+
394
+ ```py
395
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
396
+ >>> extracted_files = dl_manager.extract(downloaded_files)
397
+ ```
398
+ """
399
+ if num_proc != "deprecated":
400
+ warnings.warn(
401
+ "'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=<num_proc>)` to the initializer instead.",
402
+ FutureWarning,
403
+ )
404
+ download_config = self.download_config.copy()
405
+ download_config.extract_compressed_file = True
406
+ extract_func = partial(self._download_single, download_config=download_config)
407
+ extracted_paths = map_nested(
408
+ extract_func,
409
+ path_or_paths,
410
+ num_proc=download_config.num_proc,
411
+ desc="Extracting data files",
412
+ )
413
+ path_or_paths = NestedDataStructure(path_or_paths)
414
+ extracted_paths = NestedDataStructure(extracted_paths)
415
+ self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten())))
416
+ return extracted_paths.data
417
+
418
+ def download_and_extract(self, url_or_urls):
419
+ """Download and extract given `url_or_urls`.
420
+
421
+ Is roughly equivalent to:
422
+
423
+ ```
424
+ extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
425
+ ```
426
+
427
+ Args:
428
+ url_or_urls (`str` or `list` or `dict`):
429
+ URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
430
+
431
+ Returns:
432
+ extracted_path(s): `str`, extracted paths of given URL(s).
433
+ """
434
+ return self.extract(self.download(url_or_urls))
435
+
436
+ def get_recorded_sizes_checksums(self):
437
+ return self._recorded_sizes_checksums.copy()
438
+
439
+ def delete_extracted_files(self):
440
+ paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values())
441
+ for key, path in list(self.extracted_paths.items()):
442
+ if path in paths_to_delete and os.path.isfile(path):
443
+ os.remove(path)
444
+ del self.extracted_paths[key]
445
+
446
+ def manage_extracted_files(self):
447
+ if self.download_config.delete_extracted:
448
+ self.delete_extracted_files()
llmeval-env/lib/python3.10/site-packages/datasets/download/mock_download_manager.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Mock download manager interface."""
17
+
18
+ import os
19
+ import re
20
+ import urllib.parse
21
+ from pathlib import Path
22
+ from typing import Callable, List, Optional, Union
23
+ from zipfile import ZipFile
24
+
25
+ from ..utils.file_utils import cached_path, hf_github_url
26
+ from ..utils.logging import get_logger
27
+ from ..utils.version import Version
28
+
29
+
30
+ logger = get_logger(__name__)
31
+
32
+
33
+ class MockDownloadManager:
34
+ dummy_file_name = "dummy_data"
35
+ datasets_scripts_dir = "datasets"
36
+ is_streaming = False
37
+
38
+ def __init__(
39
+ self,
40
+ dataset_name: str,
41
+ config: str,
42
+ version: Union[Version, str],
43
+ cache_dir: Optional[str] = None,
44
+ use_local_dummy_data: bool = False,
45
+ load_existing_dummy_data: bool = True,
46
+ download_callbacks: Optional[List[Callable]] = None,
47
+ ):
48
+ self.downloaded_size = 0
49
+ self.dataset_name = dataset_name
50
+ self.cache_dir = cache_dir
51
+ self.use_local_dummy_data = use_local_dummy_data
52
+ self.config = config
53
+ # download_callbacks take a single url as input
54
+ self.download_callbacks: List[Callable] = download_callbacks or []
55
+ # if False, it doesn't load existing files and it returns the paths of the dummy files relative
56
+ # to the dummy_data zip file root
57
+ self.load_existing_dummy_data = load_existing_dummy_data
58
+
59
+ # TODO(PVP, QL) might need to make this more general
60
+ self.version_name = str(version)
61
+ # to be downloaded
62
+ self._dummy_file = None
63
+ self._bucket_url = None
64
+
65
+ @property
66
+ def dummy_file(self):
67
+ if self._dummy_file is None:
68
+ self._dummy_file = self.download_dummy_data()
69
+ return self._dummy_file
70
+
71
+ @property
72
+ def dummy_data_folder(self):
73
+ if self.config is not None:
74
+ # structure is dummy / config_name / version_name
75
+ return os.path.join("dummy", self.config.name, self.version_name)
76
+ # structure is dummy / version_name
77
+ return os.path.join("dummy", self.version_name)
78
+
79
+ @property
80
+ def dummy_zip_file(self):
81
+ return os.path.join(self.dummy_data_folder, "dummy_data.zip")
82
+
83
+ def download_dummy_data(self):
84
+ path_to_dummy_data_dir = (
85
+ self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
86
+ )
87
+
88
+ local_path = cached_path(
89
+ path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True
90
+ )
91
+
92
+ return os.path.join(local_path, self.dummy_file_name)
93
+
94
+ @property
95
+ def local_path_to_dummy_data(self):
96
+ return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file)
97
+
98
+ @property
99
+ def github_path_to_dummy_data(self):
100
+ if self._bucket_url is None:
101
+ self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/"))
102
+ return self._bucket_url
103
+
104
+ @property
105
+ def manual_dir(self):
106
+ # return full path if its a dir
107
+ if os.path.isdir(self.dummy_file):
108
+ return self.dummy_file
109
+ # else cut off path to file -> example `xsum`.
110
+ return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1])
111
+
112
+ # this function has to be in the manager under this name so that testing works
113
+ def download_and_extract(self, data_url, *args):
114
+ if self.load_existing_dummy_data:
115
+ # dummy data is downloaded and tested
116
+ dummy_file = self.dummy_file
117
+ else:
118
+ # dummy data cannot be downloaded and only the path to dummy file is returned
119
+ dummy_file = self.dummy_file_name
120
+
121
+ # special case when data_url is a dict
122
+ if isinstance(data_url, dict):
123
+ return self.create_dummy_data_dict(dummy_file, data_url)
124
+ elif isinstance(data_url, (list, tuple)):
125
+ return self.create_dummy_data_list(dummy_file, data_url)
126
+ else:
127
+ return self.create_dummy_data_single(dummy_file, data_url)
128
+
129
+ # this function has to be in the manager under this name so that testing works
130
+ def download(self, data_url, *args):
131
+ return self.download_and_extract(data_url)
132
+
133
+ # this function has to be in the manager under this name so that testing works
134
+ def download_custom(self, data_url, custom_download):
135
+ return self.download_and_extract(data_url)
136
+
137
+ # this function has to be in the manager under this name so that testing works
138
+ def extract(self, path, *args, **kwargs):
139
+ return path
140
+
141
+ # this function has to be in the manager under this name so that testing works
142
+ def get_recorded_sizes_checksums(self):
143
+ return {}
144
+
145
+ def create_dummy_data_dict(self, path_to_dummy_data, data_url):
146
+ dummy_data_dict = {}
147
+ for key, single_urls in data_url.items():
148
+ for download_callback in self.download_callbacks:
149
+ if isinstance(single_urls, list):
150
+ for single_url in single_urls:
151
+ download_callback(single_url)
152
+ else:
153
+ single_url = single_urls
154
+ download_callback(single_url)
155
+ # we force the name of each key to be the last file / folder name of the url path
156
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
157
+ if isinstance(single_urls, list):
158
+ value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls]
159
+ else:
160
+ single_url = single_urls
161
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name))
162
+ dummy_data_dict[key] = value
163
+
164
+ # make sure that values are unique
165
+ if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
166
+ dummy_data_dict.values()
167
+ ):
168
+ # append key to value to make its name unique
169
+ dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()}
170
+
171
+ return dummy_data_dict
172
+
173
+ def create_dummy_data_list(self, path_to_dummy_data, data_url):
174
+ dummy_data_list = []
175
+ # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
176
+ is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url)
177
+ is_pubmed_records = all(
178
+ url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url
179
+ )
180
+ if data_url and (is_tf_records or is_pubmed_records):
181
+ data_url = [data_url[0]] * len(data_url)
182
+ for single_url in data_url:
183
+ for download_callback in self.download_callbacks:
184
+ download_callback(single_url)
185
+ # we force the name of each key to be the last file / folder name of the url path
186
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
187
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1]))
188
+ dummy_data_list.append(value)
189
+ return dummy_data_list
190
+
191
+ def create_dummy_data_single(self, path_to_dummy_data, data_url):
192
+ for download_callback in self.download_callbacks:
193
+ download_callback(data_url)
194
+ # we force the name of each key to be the last file / folder name of the url path
195
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
196
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1]))
197
+ if os.path.exists(value) or not self.load_existing_dummy_data:
198
+ return value
199
+ else:
200
+ # Backward compatibility, maybe deprecate at one point.
201
+ # For many datasets with single url calls to dl_manager.download_and_extract,
202
+ # the dummy_data.zip file is actually the zipped downloaded file
203
+ # while now we expected the dummy_data.zip file to be a directory containing
204
+ # the downloaded file.
205
+ return path_to_dummy_data
206
+
207
+ def delete_extracted_files(self):
208
+ pass
209
+
210
+ def manage_extracted_files(self):
211
+ pass
212
+
213
+ def iter_archive(self, path):
214
+ def _iter_archive_members(path):
215
+ # this preserves the order of the members inside the ZIP archive
216
+ dummy_parent_path = Path(self.dummy_file).parent
217
+ relative_path = path.relative_to(dummy_parent_path)
218
+ with ZipFile(self.local_path_to_dummy_data) as zip_file:
219
+ members = zip_file.namelist()
220
+ for member in members:
221
+ if member.startswith(relative_path.as_posix()):
222
+ yield dummy_parent_path.joinpath(member)
223
+
224
+ path = Path(path)
225
+ file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*")
226
+ for file_path in file_paths:
227
+ if file_path.is_file() and not file_path.name.startswith((".", "__")):
228
+ yield file_path.relative_to(path).as_posix(), file_path.open("rb")
229
+
230
+ def iter_files(self, paths):
231
+ if not isinstance(paths, list):
232
+ paths = [paths]
233
+ for path in paths:
234
+ if os.path.isfile(path):
235
+ yield path
236
+ else:
237
+ for dirpath, dirnames, filenames in os.walk(path):
238
+ if os.path.basename(dirpath).startswith((".", "__")):
239
+ continue
240
+ dirnames.sort()
241
+ for filename in sorted(filenames):
242
+ if filename.startswith((".", "__")):
243
+ continue
244
+ yield os.path.join(dirpath, filename)
llmeval-env/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ from typing import Iterable, List, Optional, Tuple, Union
4
+
5
+ from ..utils.file_utils import ( # noqa: F401 # backward compatibility
6
+ SINGLE_FILE_COMPRESSION_PROTOCOLS,
7
+ ArchiveIterable,
8
+ FilesIterable,
9
+ _get_extraction_protocol,
10
+ _get_path_extension,
11
+ _prepare_path_and_storage_options,
12
+ is_relative_path,
13
+ url_or_path_join,
14
+ xbasename,
15
+ xdirname,
16
+ xet_parse,
17
+ xexists,
18
+ xgetsize,
19
+ xglob,
20
+ xgzip_open,
21
+ xisdir,
22
+ xisfile,
23
+ xjoin,
24
+ xlistdir,
25
+ xnumpy_load,
26
+ xopen,
27
+ xpandas_read_csv,
28
+ xpandas_read_excel,
29
+ xPath,
30
+ xpyarrow_parquet_read_table,
31
+ xrelpath,
32
+ xsio_loadmat,
33
+ xsplit,
34
+ xsplitext,
35
+ xwalk,
36
+ xxml_dom_minidom_parse,
37
+ )
38
+ from ..utils.logging import get_logger
39
+ from ..utils.py_utils import map_nested
40
+ from .download_config import DownloadConfig
41
+
42
+
43
+ logger = get_logger(__name__)
44
+
45
+
46
+ class StreamingDownloadManager:
47
+ """
48
+ Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
49
+ Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
50
+ data, but they rather return the path or url that could be opened using the `xopen` function which extends the
51
+ built-in `open` function to stream data from remote files.
52
+ """
53
+
54
+ is_streaming = True
55
+
56
+ def __init__(
57
+ self,
58
+ dataset_name: Optional[str] = None,
59
+ data_dir: Optional[str] = None,
60
+ download_config: Optional[DownloadConfig] = None,
61
+ base_path: Optional[str] = None,
62
+ ):
63
+ self._dataset_name = dataset_name
64
+ self._data_dir = data_dir
65
+ self._base_path = base_path or os.path.abspath(".")
66
+ self.download_config = download_config or DownloadConfig()
67
+
68
+ @property
69
+ def manual_dir(self):
70
+ return self._data_dir
71
+
72
+ def download(self, url_or_urls):
73
+ """Normalize URL(s) of files to stream data from.
74
+ This is the lazy version of `DownloadManager.download` for streaming.
75
+
76
+ Args:
77
+ url_or_urls (`str` or `list` or `dict`):
78
+ URL(s) of files to stream data from. Each url is a `str`.
79
+
80
+ Returns:
81
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
82
+
83
+ Example:
84
+
85
+ ```py
86
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
87
+ ```
88
+ """
89
+ url_or_urls = map_nested(self._download_single, url_or_urls, map_tuple=True)
90
+ return url_or_urls
91
+
92
+ def _download_single(self, urlpath: str) -> str:
93
+ urlpath = str(urlpath)
94
+ if is_relative_path(urlpath):
95
+ # append the relative path to the base_path
96
+ urlpath = url_or_path_join(self._base_path, urlpath)
97
+ return urlpath
98
+
99
+ def extract(self, url_or_urls):
100
+ """Add extraction protocol for given url(s) for streaming.
101
+
102
+ This is the lazy version of `DownloadManager.extract` for streaming.
103
+
104
+ Args:
105
+ url_or_urls (`str` or `list` or `dict`):
106
+ URL(s) of files to stream data from. Each url is a `str`.
107
+
108
+ Returns:
109
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
110
+
111
+ Example:
112
+
113
+ ```py
114
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
115
+ >>> extracted_files = dl_manager.extract(downloaded_files)
116
+ ```
117
+ """
118
+ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
119
+ return urlpaths
120
+
121
+ def _extract(self, urlpath: str) -> str:
122
+ urlpath = str(urlpath)
123
+ protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
124
+ # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
125
+ path = urlpath.split("::")[0]
126
+ extension = _get_path_extension(path)
127
+ if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
128
+ raise NotImplementedError(
129
+ f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
130
+ f"Please use `dl_manager.iter_archive` instead.\n\n"
131
+ f"Example usage:\n\n"
132
+ f"\turl = dl_manager.download(url)\n"
133
+ f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
134
+ f"\tfor filename, file in tar_archive_iterator:\n"
135
+ f"\t\t..."
136
+ )
137
+ if protocol is None:
138
+ # no extraction
139
+ return urlpath
140
+ elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
141
+ # there is one single file which is the uncompressed file
142
+ inner_file = os.path.basename(urlpath.split("::")[0])
143
+ inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
144
+ return f"{protocol}://{inner_file}::{urlpath}"
145
+ else:
146
+ return f"{protocol}://::{urlpath}"
147
+
148
+ def download_and_extract(self, url_or_urls):
149
+ """Prepare given `url_or_urls` for streaming (add extraction protocol).
150
+
151
+ This is the lazy version of `DownloadManager.download_and_extract` for streaming.
152
+
153
+ Is equivalent to:
154
+
155
+ ```
156
+ urls = dl_manager.extract(dl_manager.download(url_or_urls))
157
+ ```
158
+
159
+ Args:
160
+ url_or_urls (`str` or `list` or `dict`):
161
+ URL(s) to stream from data from. Each url is a `str`.
162
+
163
+ Returns:
164
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
165
+ """
166
+ return self.extract(self.download(url_or_urls))
167
+
168
+ def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
169
+ """Iterate over files within an archive.
170
+
171
+ Args:
172
+ urlpath_or_buf (`str` or `io.BufferedReader`):
173
+ Archive path or archive binary file object.
174
+
175
+ Yields:
176
+ `tuple[str, io.BufferedReader]`:
177
+ 2-tuple (path_within_archive, file_object).
178
+ File object is opened in binary mode.
179
+
180
+ Example:
181
+
182
+ ```py
183
+ >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
184
+ >>> files = dl_manager.iter_archive(archive)
185
+ ```
186
+ """
187
+
188
+ if hasattr(urlpath_or_buf, "read"):
189
+ return ArchiveIterable.from_buf(urlpath_or_buf)
190
+ else:
191
+ return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
192
+
193
+ def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
194
+ """Iterate over files.
195
+
196
+ Args:
197
+ urlpaths (`str` or `list` of `str`):
198
+ Root paths.
199
+
200
+ Yields:
201
+ str: File URL path.
202
+
203
+ Example:
204
+
205
+ ```py
206
+ >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
207
+ >>> files = dl_manager.iter_files(files)
208
+ ```
209
+ """
210
+ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
llmeval-env/lib/python3.10/site-packages/datasets/filesystems/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import shutil
3
+ import warnings
4
+ from typing import List
5
+
6
+ import fsspec
7
+ import fsspec.asyn
8
+ from fsspec.implementations.local import LocalFileSystem
9
+
10
+ from ..utils.deprecation_utils import deprecated
11
+ from . import compression
12
+
13
+
14
+ _has_s3fs = importlib.util.find_spec("s3fs") is not None
15
+
16
+ if _has_s3fs:
17
+ from .s3filesystem import S3FileSystem # noqa: F401
18
+
19
+ COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
20
+ compression.Bz2FileSystem,
21
+ compression.GzipFileSystem,
22
+ compression.Lz4FileSystem,
23
+ compression.XzFileSystem,
24
+ compression.ZstdFileSystem,
25
+ ]
26
+
27
+ # Register custom filesystems
28
+ for fs_class in COMPRESSION_FILESYSTEMS:
29
+ if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
30
+ warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
31
+ fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
32
+
33
+
34
+ @deprecated(
35
+ "This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
36
+ )
37
+ def extract_path_from_uri(dataset_path: str) -> str:
38
+ """
39
+ Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
40
+
41
+ Args:
42
+ dataset_path (`str`):
43
+ Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
44
+ """
45
+ if "://" in dataset_path:
46
+ dataset_path = dataset_path.split("://")[1]
47
+ return dataset_path
48
+
49
+
50
+ def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
51
+ """
52
+ Checks if `fs` is a remote filesystem.
53
+
54
+ Args:
55
+ fs (`fsspec.spec.AbstractFileSystem`):
56
+ An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
57
+ """
58
+ return not isinstance(fs, LocalFileSystem)
59
+
60
+
61
+ def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
62
+ """
63
+ Renames the file `src` in `fs` to `dst`.
64
+ """
65
+ if not is_remote_filesystem(fs):
66
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
67
+ shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
68
+ else:
69
+ fs.mv(src, dst, recursive=True)
llmeval-env/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc ADDED
Binary file (4.24 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc ADDED
Binary file (6.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/filesystems/compression.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Optional
3
+
4
+ import fsspec
5
+ from fsspec.archive import AbstractArchiveFileSystem
6
+
7
+
8
+ class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
9
+ """Read contents of compressed file as a filesystem with one file inside."""
10
+
11
+ root_marker = ""
12
+ protocol: str = (
13
+ None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
14
+ )
15
+ compression: str = None # compression type in fsspec. ex: "gzip"
16
+ extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
17
+
18
+ def __init__(
19
+ self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
20
+ ):
21
+ """
22
+ The compressed file system can be instantiated from any compressed file.
23
+ It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
24
+
25
+ The single file inside the filesystem is named after the compresssed file,
26
+ without the compression extension at the end of the filename.
27
+
28
+ Args:
29
+ fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
30
+ mode (:obj:``str``): Currently, only 'rb' accepted
31
+ target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
32
+ target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
33
+ """
34
+ super().__init__(self, **kwargs)
35
+ # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
36
+ self.file = fsspec.open(
37
+ fo,
38
+ mode="rb",
39
+ protocol=target_protocol,
40
+ compression=self.compression,
41
+ client_kwargs={
42
+ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
43
+ "trust_env": True, # Enable reading proxy env variables.
44
+ **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
45
+ },
46
+ **(target_options or {}),
47
+ )
48
+ self.compressed_name = os.path.basename(self.file.path.split("::")[0])
49
+ self.uncompressed_name = (
50
+ self.compressed_name[: self.compressed_name.rindex(".")]
51
+ if "." in self.compressed_name
52
+ else self.compressed_name
53
+ )
54
+ self.dir_cache = None
55
+
56
+ @classmethod
57
+ def _strip_protocol(cls, path):
58
+ # compressed file paths are always relative to the archive root
59
+ return super()._strip_protocol(path).lstrip("/")
60
+
61
+ def _get_dirs(self):
62
+ if self.dir_cache is None:
63
+ f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
64
+ self.dir_cache = {f["name"]: f}
65
+
66
+ def cat(self, path: str):
67
+ return self.file.open().read()
68
+
69
+ def _open(
70
+ self,
71
+ path: str,
72
+ mode: str = "rb",
73
+ block_size=None,
74
+ autocommit=True,
75
+ cache_options=None,
76
+ **kwargs,
77
+ ):
78
+ path = self._strip_protocol(path)
79
+ if mode != "rb":
80
+ raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
81
+ return self.file.open()
82
+
83
+
84
+ class Bz2FileSystem(BaseCompressedFileFileSystem):
85
+ """Read contents of BZ2 file as a filesystem with one file inside."""
86
+
87
+ protocol = "bz2"
88
+ compression = "bz2"
89
+ extension = ".bz2"
90
+
91
+
92
+ class GzipFileSystem(BaseCompressedFileFileSystem):
93
+ """Read contents of GZIP file as a filesystem with one file inside."""
94
+
95
+ protocol = "gzip"
96
+ compression = "gzip"
97
+ extension = ".gz"
98
+
99
+
100
+ class Lz4FileSystem(BaseCompressedFileFileSystem):
101
+ """Read contents of LZ4 file as a filesystem with one file inside."""
102
+
103
+ protocol = "lz4"
104
+ compression = "lz4"
105
+ extension = ".lz4"
106
+
107
+
108
+ class XzFileSystem(BaseCompressedFileFileSystem):
109
+ """Read contents of .xz (LZMA) file as a filesystem with one file inside."""
110
+
111
+ protocol = "xz"
112
+ compression = "xz"
113
+ extension = ".xz"
114
+
115
+
116
+ class ZstdFileSystem(BaseCompressedFileFileSystem):
117
+ """
118
+ Read contents of .zstd file as a filesystem with one file inside.
119
+ """
120
+
121
+ protocol = "zstd"
122
+ compression = "zstd"
123
+ extension = ".zst"
llmeval-env/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import s3fs
2
+
3
+ from ..utils.deprecation_utils import deprecated
4
+
5
+
6
+ @deprecated("Use s3fs.S3FileSystem instead.")
7
+ class S3FileSystem(s3fs.S3FileSystem):
8
+ """
9
+ `datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html).
10
+
11
+ Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`.
12
+
13
+ Args:
14
+ anon (`bool`, default to `False`):
15
+ Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given,
16
+ or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order).
17
+ key (`str`):
18
+ If not anonymous, use this access key ID, if specified.
19
+ secret (`str`):
20
+ If not anonymous, use this secret access key, if specified.
21
+ token (`str`):
22
+ If not anonymous, use this security token, if specified.
23
+ use_ssl (`bool`, defaults to `True`):
24
+ Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is
25
+ also set in `client_kwargs`, the value set in `client_kwargs` will take priority.
26
+ s3_additional_kwargs (`dict`):
27
+ Parameters that are used when calling S3 API methods. Typically used for things
28
+ like ServerSideEncryption.
29
+ client_kwargs (`dict`):
30
+ Parameters for the botocore client.
31
+ requester_pays (`bool`, defaults to `False`):
32
+ Whether `RequesterPays` buckets are supported.
33
+ default_block_size (`int`):
34
+ If given, the default block size value used for `open()`, if no specific value is given at all time.
35
+ The built-in default is 5MB.
36
+ default_fill_cache (`bool`, defaults to `True`):
37
+ Whether to use cache filling with open by default. Refer to `S3File.open`.
38
+ default_cache_type (`str`, defaults to `bytes`):
39
+ If given, the default `cache_type` value used for `open()`. Set to `none` if no
40
+ caching is desired. See fsspec's documentation for other available `cache_type` values.
41
+ version_aware (`bool`, defaults to `False`):
42
+ Whether to support bucket versioning. If enable this will require the user to have
43
+ the necessary IAM permissions for dealing with versioned objects.
44
+ cache_regions (`bool`, defaults to `False`):
45
+ Whether to cache bucket regions. Whenever a new bucket is used, it will
46
+ first find out which region it belongs to and then use the client for that region.
47
+ asynchronous (`bool`, defaults to `False`):
48
+ Whether this instance is to be used from inside coroutines.
49
+ config_kwargs (`dict`):
50
+ Parameters passed to `botocore.client.Config`.
51
+ **kwargs:
52
+ Other parameters for core session.
53
+ session (`aiobotocore.session.AioSession`):
54
+ Session to be used for all connections. This session will be used inplace of creating
55
+ a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`.
56
+ skip_instance_cache (`bool`):
57
+ Control reuse of instances. Passed on to `fsspec`.
58
+ use_listings_cache (`bool`):
59
+ Control reuse of directory listings. Passed on to `fsspec`.
60
+ listings_expiry_time (`int` or `float`):
61
+ Control reuse of directory listings. Passed on to `fsspec`.
62
+ max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`.
63
+
64
+ Examples:
65
+
66
+ Listing files from public S3 bucket.
67
+
68
+ ```py
69
+ >>> import datasets
70
+ >>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP
71
+ >>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP
72
+ ['dataset_info.json.json','dataset.arrow','state.json']
73
+ ```
74
+
75
+ Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`.
76
+
77
+ ```py
78
+ >>> import datasets
79
+ >>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
80
+ >>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP
81
+ ['dataset_info.json.json','dataset.arrow','state.json']
82
+ ```
83
+
84
+ Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`.
85
+
86
+ ```py
87
+ >>> import botocore
88
+ >>> from datasets.filesystems import S3Filesystem
89
+
90
+ >>> s3_session = botocore.session.Session(profile_name='my_profile_name')
91
+ >>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP
92
+ ```
93
+
94
+ Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`].
95
+
96
+ ```py
97
+ >>> from datasets import load_from_disk
98
+ >>> from datasets.filesystems import S3Filesystem
99
+
100
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
101
+ >>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
102
+ >>> print(len(dataset))
103
+ 25000
104
+ ```
105
+
106
+ Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`].
107
+
108
+ ```py
109
+ >>> from datasets import load_dataset
110
+ >>> from datasets.filesystems import S3Filesystem
111
+
112
+ >>> dataset = load_dataset("imdb")
113
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
114
+ >>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
115
+ ```
116
+ """
llmeval-env/lib/python3.10/site-packages/datasets/io/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc ADDED
Binary file (4.49 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc ADDED
Binary file (5.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc ADDED
Binary file (1.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/datasets/io/abc.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import Optional, Union
3
+
4
+ from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
5
+ from ..utils.typing import NestedDataStructureLike, PathLike
6
+
7
+
8
+ class AbstractDatasetReader(ABC):
9
+ def __init__(
10
+ self,
11
+ path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None,
12
+ split: Optional[NamedSplit] = None,
13
+ features: Optional[Features] = None,
14
+ cache_dir: str = None,
15
+ keep_in_memory: bool = False,
16
+ streaming: bool = False,
17
+ num_proc: Optional[int] = None,
18
+ **kwargs,
19
+ ):
20
+ self.path_or_paths = path_or_paths
21
+ self.split = split if split or isinstance(path_or_paths, dict) else "train"
22
+ self.features = features
23
+ self.cache_dir = cache_dir
24
+ self.keep_in_memory = keep_in_memory
25
+ self.streaming = streaming
26
+ self.num_proc = num_proc
27
+ self.kwargs = kwargs
28
+
29
+ @abstractmethod
30
+ def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
31
+ pass
32
+
33
+
34
+ class AbstractDatasetInputStream(ABC):
35
+ def __init__(
36
+ self,
37
+ features: Optional[Features] = None,
38
+ cache_dir: str = None,
39
+ keep_in_memory: bool = False,
40
+ streaming: bool = False,
41
+ num_proc: Optional[int] = None,
42
+ **kwargs,
43
+ ):
44
+ self.features = features
45
+ self.cache_dir = cache_dir
46
+ self.keep_in_memory = keep_in_memory
47
+ self.streaming = streaming
48
+ self.num_proc = num_proc
49
+ self.kwargs = kwargs
50
+
51
+ @abstractmethod
52
+ def read(self) -> Union[Dataset, IterableDataset]:
53
+ pass
llmeval-env/lib/python3.10/site-packages/datasets/io/csv.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import os
3
+ from typing import BinaryIO, Optional, Union
4
+
5
+ import fsspec
6
+
7
+ from .. import Dataset, Features, NamedSplit, config
8
+ from ..formatting import query_table
9
+ from ..packaged_modules.csv.csv import Csv
10
+ from ..utils import tqdm as hf_tqdm
11
+ from ..utils.typing import NestedDataStructureLike, PathLike
12
+ from .abc import AbstractDatasetReader
13
+
14
+
15
+ class CsvDatasetReader(AbstractDatasetReader):
16
+ def __init__(
17
+ self,
18
+ path_or_paths: NestedDataStructureLike[PathLike],
19
+ split: Optional[NamedSplit] = None,
20
+ features: Optional[Features] = None,
21
+ cache_dir: str = None,
22
+ keep_in_memory: bool = False,
23
+ streaming: bool = False,
24
+ num_proc: Optional[int] = None,
25
+ **kwargs,
26
+ ):
27
+ super().__init__(
28
+ path_or_paths,
29
+ split=split,
30
+ features=features,
31
+ cache_dir=cache_dir,
32
+ keep_in_memory=keep_in_memory,
33
+ streaming=streaming,
34
+ num_proc=num_proc,
35
+ **kwargs,
36
+ )
37
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
38
+ self.builder = Csv(
39
+ cache_dir=cache_dir,
40
+ data_files=path_or_paths,
41
+ features=features,
42
+ **kwargs,
43
+ )
44
+
45
+ def read(self):
46
+ # Build iterable dataset
47
+ if self.streaming:
48
+ dataset = self.builder.as_streaming_dataset(split=self.split)
49
+ # Build regular (map-style) dataset
50
+ else:
51
+ download_config = None
52
+ download_mode = None
53
+ verification_mode = None
54
+ base_path = None
55
+
56
+ self.builder.download_and_prepare(
57
+ download_config=download_config,
58
+ download_mode=download_mode,
59
+ verification_mode=verification_mode,
60
+ base_path=base_path,
61
+ num_proc=self.num_proc,
62
+ )
63
+ dataset = self.builder.as_dataset(
64
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
65
+ )
66
+ return dataset
67
+
68
+
69
+ class CsvDatasetWriter:
70
+ def __init__(
71
+ self,
72
+ dataset: Dataset,
73
+ path_or_buf: Union[PathLike, BinaryIO],
74
+ batch_size: Optional[int] = None,
75
+ num_proc: Optional[int] = None,
76
+ storage_options: Optional[dict] = None,
77
+ **to_csv_kwargs,
78
+ ):
79
+ if num_proc is not None and num_proc <= 0:
80
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
81
+
82
+ self.dataset = dataset
83
+ self.path_or_buf = path_or_buf
84
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
85
+ self.num_proc = num_proc
86
+ self.encoding = "utf-8"
87
+ self.storage_options = storage_options or {}
88
+ self.to_csv_kwargs = to_csv_kwargs
89
+
90
+ def write(self) -> int:
91
+ _ = self.to_csv_kwargs.pop("path_or_buf", None)
92
+ header = self.to_csv_kwargs.pop("header", True)
93
+ index = self.to_csv_kwargs.pop("index", False)
94
+
95
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
96
+ with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer:
97
+ written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs)
98
+ else:
99
+ written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs)
100
+ return written
101
+
102
+ def _batch_csv(self, args):
103
+ offset, header, index, to_csv_kwargs = args
104
+
105
+ batch = query_table(
106
+ table=self.dataset.data,
107
+ key=slice(offset, offset + self.batch_size),
108
+ indices=self.dataset._indices,
109
+ )
110
+ csv_str = batch.to_pandas().to_csv(
111
+ path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs
112
+ )
113
+ return csv_str.encode(self.encoding)
114
+
115
+ def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int:
116
+ """Writes the pyarrow table as CSV to a binary file handle.
117
+
118
+ Caller is responsible for opening and closing the handle.
119
+ """
120
+ written = 0
121
+
122
+ if self.num_proc is None or self.num_proc == 1:
123
+ for offset in hf_tqdm(
124
+ range(0, len(self.dataset), self.batch_size),
125
+ unit="ba",
126
+ desc="Creating CSV from Arrow format",
127
+ ):
128
+ csv_str = self._batch_csv((offset, header, index, to_csv_kwargs))
129
+ written += file_obj.write(csv_str)
130
+
131
+ else:
132
+ num_rows, batch_size = len(self.dataset), self.batch_size
133
+ with multiprocessing.Pool(self.num_proc) as pool:
134
+ for csv_str in hf_tqdm(
135
+ pool.imap(
136
+ self._batch_csv,
137
+ [(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)],
138
+ ),
139
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
140
+ unit="ba",
141
+ desc="Creating CSV from Arrow format",
142
+ ):
143
+ written += file_obj.write(csv_str)
144
+
145
+ return written
llmeval-env/lib/python3.10/site-packages/datasets/io/generator.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional
2
+
3
+ from .. import Features
4
+ from ..packaged_modules.generator.generator import Generator
5
+ from .abc import AbstractDatasetInputStream
6
+
7
+
8
+ class GeneratorDatasetInputStream(AbstractDatasetInputStream):
9
+ def __init__(
10
+ self,
11
+ generator: Callable,
12
+ features: Optional[Features] = None,
13
+ cache_dir: str = None,
14
+ keep_in_memory: bool = False,
15
+ streaming: bool = False,
16
+ gen_kwargs: Optional[dict] = None,
17
+ num_proc: Optional[int] = None,
18
+ **kwargs,
19
+ ):
20
+ super().__init__(
21
+ features=features,
22
+ cache_dir=cache_dir,
23
+ keep_in_memory=keep_in_memory,
24
+ streaming=streaming,
25
+ num_proc=num_proc,
26
+ **kwargs,
27
+ )
28
+ self.builder = Generator(
29
+ cache_dir=cache_dir,
30
+ features=features,
31
+ generator=generator,
32
+ gen_kwargs=gen_kwargs,
33
+ **kwargs,
34
+ )
35
+
36
+ def read(self):
37
+ # Build iterable dataset
38
+ if self.streaming:
39
+ dataset = self.builder.as_streaming_dataset(split="train")
40
+ # Build regular (map-style) dataset
41
+ else:
42
+ download_config = None
43
+ download_mode = None
44
+ verification_mode = None
45
+ base_path = None
46
+
47
+ self.builder.download_and_prepare(
48
+ download_config=download_config,
49
+ download_mode=download_mode,
50
+ verification_mode=verification_mode,
51
+ base_path=base_path,
52
+ num_proc=self.num_proc,
53
+ )
54
+ dataset = self.builder.as_dataset(
55
+ split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
56
+ )
57
+ return dataset
llmeval-env/lib/python3.10/site-packages/datasets/io/json.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import os
3
+ from typing import BinaryIO, Optional, Union
4
+
5
+ import fsspec
6
+
7
+ from .. import Dataset, Features, NamedSplit, config
8
+ from ..formatting import query_table
9
+ from ..packaged_modules.json.json import Json
10
+ from ..utils import tqdm as hf_tqdm
11
+ from ..utils.typing import NestedDataStructureLike, PathLike
12
+ from .abc import AbstractDatasetReader
13
+
14
+
15
+ class JsonDatasetReader(AbstractDatasetReader):
16
+ def __init__(
17
+ self,
18
+ path_or_paths: NestedDataStructureLike[PathLike],
19
+ split: Optional[NamedSplit] = None,
20
+ features: Optional[Features] = None,
21
+ cache_dir: str = None,
22
+ keep_in_memory: bool = False,
23
+ streaming: bool = False,
24
+ field: Optional[str] = None,
25
+ num_proc: Optional[int] = None,
26
+ **kwargs,
27
+ ):
28
+ super().__init__(
29
+ path_or_paths,
30
+ split=split,
31
+ features=features,
32
+ cache_dir=cache_dir,
33
+ keep_in_memory=keep_in_memory,
34
+ streaming=streaming,
35
+ num_proc=num_proc,
36
+ **kwargs,
37
+ )
38
+ self.field = field
39
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
40
+ self.builder = Json(
41
+ cache_dir=cache_dir,
42
+ data_files=path_or_paths,
43
+ features=features,
44
+ field=field,
45
+ **kwargs,
46
+ )
47
+
48
+ def read(self):
49
+ # Build iterable dataset
50
+ if self.streaming:
51
+ dataset = self.builder.as_streaming_dataset(split=self.split)
52
+ # Build regular (map-style) dataset
53
+ else:
54
+ download_config = None
55
+ download_mode = None
56
+ verification_mode = None
57
+ base_path = None
58
+
59
+ self.builder.download_and_prepare(
60
+ download_config=download_config,
61
+ download_mode=download_mode,
62
+ verification_mode=verification_mode,
63
+ # try_from_hf_gcs=try_from_hf_gcs,
64
+ base_path=base_path,
65
+ num_proc=self.num_proc,
66
+ )
67
+ dataset = self.builder.as_dataset(
68
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
69
+ )
70
+ return dataset
71
+
72
+
73
+ class JsonDatasetWriter:
74
+ def __init__(
75
+ self,
76
+ dataset: Dataset,
77
+ path_or_buf: Union[PathLike, BinaryIO],
78
+ batch_size: Optional[int] = None,
79
+ num_proc: Optional[int] = None,
80
+ storage_options: Optional[dict] = None,
81
+ **to_json_kwargs,
82
+ ):
83
+ if num_proc is not None and num_proc <= 0:
84
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
85
+
86
+ self.dataset = dataset
87
+ self.path_or_buf = path_or_buf
88
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
89
+ self.num_proc = num_proc
90
+ self.encoding = "utf-8"
91
+ self.storage_options = storage_options or {}
92
+ self.to_json_kwargs = to_json_kwargs
93
+
94
+ def write(self) -> int:
95
+ _ = self.to_json_kwargs.pop("path_or_buf", None)
96
+ orient = self.to_json_kwargs.pop("orient", "records")
97
+ lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
98
+ if "index" not in self.to_json_kwargs and orient in ["split", "table"]:
99
+ self.to_json_kwargs["index"] = False
100
+
101
+ # Determine the default compression value based on self.path_or_buf type
102
+ default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None
103
+ compression = self.to_json_kwargs.pop("compression", default_compression)
104
+
105
+ if compression not in [None, "infer", "gzip", "bz2", "xz"]:
106
+ raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
107
+
108
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
109
+ with fsspec.open(
110
+ self.path_or_buf, "wb", compression=compression, **(self.storage_options or {})
111
+ ) as buffer:
112
+ written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs)
113
+ else:
114
+ if compression:
115
+ raise NotImplementedError(
116
+ f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
117
+ " was passed. Please provide a local path instead."
118
+ )
119
+ written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs)
120
+ return written
121
+
122
+ def _batch_json(self, args):
123
+ offset, orient, lines, to_json_kwargs = args
124
+
125
+ batch = query_table(
126
+ table=self.dataset.data,
127
+ key=slice(offset, offset + self.batch_size),
128
+ indices=self.dataset._indices,
129
+ )
130
+ json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs)
131
+ if not json_str.endswith("\n"):
132
+ json_str += "\n"
133
+ return json_str.encode(self.encoding)
134
+
135
+ def _write(
136
+ self,
137
+ file_obj: BinaryIO,
138
+ orient,
139
+ lines,
140
+ **to_json_kwargs,
141
+ ) -> int:
142
+ """Writes the pyarrow table as JSON lines to a binary file handle.
143
+
144
+ Caller is responsible for opening and closing the handle.
145
+ """
146
+ written = 0
147
+
148
+ if self.num_proc is None or self.num_proc == 1:
149
+ for offset in hf_tqdm(
150
+ range(0, len(self.dataset), self.batch_size),
151
+ unit="ba",
152
+ desc="Creating json from Arrow format",
153
+ ):
154
+ json_str = self._batch_json((offset, orient, lines, to_json_kwargs))
155
+ written += file_obj.write(json_str)
156
+ else:
157
+ num_rows, batch_size = len(self.dataset), self.batch_size
158
+ with multiprocessing.Pool(self.num_proc) as pool:
159
+ for json_str in hf_tqdm(
160
+ pool.imap(
161
+ self._batch_json,
162
+ [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
163
+ ),
164
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
165
+ unit="ba",
166
+ desc="Creating json from Arrow format",
167
+ ):
168
+ written += file_obj.write(json_str)
169
+
170
+ return written
llmeval-env/lib/python3.10/site-packages/datasets/io/parquet.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import BinaryIO, Optional, Union
3
+
4
+ import fsspec
5
+ import numpy as np
6
+ import pyarrow.parquet as pq
7
+
8
+ from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
9
+ from ..features.features import FeatureType, _visit
10
+ from ..formatting import query_table
11
+ from ..packaged_modules import _PACKAGED_DATASETS_MODULES
12
+ from ..packaged_modules.parquet.parquet import Parquet
13
+ from ..utils import tqdm as hf_tqdm
14
+ from ..utils.typing import NestedDataStructureLike, PathLike
15
+ from .abc import AbstractDatasetReader
16
+
17
+
18
+ def get_writer_batch_size(features: Features) -> Optional[int]:
19
+ """
20
+ Get the writer_batch_size that defines the maximum row group size in the parquet files.
21
+ The default in `datasets` is 1,000 but we lower it to 100 for image datasets.
22
+ This allows to optimize random access to parquet file, since accessing 1 row requires
23
+ to read its entire row group.
24
+
25
+ This can be improved to get optimized size for querying/iterating
26
+ but at least it matches the dataset viewer expectations on HF.
27
+
28
+ Args:
29
+ ds_config_info (`datasets.info.DatasetInfo`):
30
+ Dataset info from `datasets`.
31
+ Returns:
32
+ writer_batch_size (`Optional[int]`):
33
+ Writer batch size to pass to a dataset builder.
34
+ If `None`, then it will use the `datasets` default.
35
+ """
36
+
37
+ batch_size = np.inf
38
+
39
+ def set_batch_size(feature: FeatureType) -> None:
40
+ nonlocal batch_size
41
+ if isinstance(feature, Image):
42
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
43
+ elif isinstance(feature, Audio):
44
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
45
+ elif isinstance(feature, Value) and feature.dtype == "binary":
46
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
47
+
48
+ _visit(features, set_batch_size)
49
+
50
+ return None if batch_size is np.inf else batch_size
51
+
52
+
53
+ class ParquetDatasetReader(AbstractDatasetReader):
54
+ def __init__(
55
+ self,
56
+ path_or_paths: NestedDataStructureLike[PathLike],
57
+ split: Optional[NamedSplit] = None,
58
+ features: Optional[Features] = None,
59
+ cache_dir: str = None,
60
+ keep_in_memory: bool = False,
61
+ streaming: bool = False,
62
+ num_proc: Optional[int] = None,
63
+ **kwargs,
64
+ ):
65
+ super().__init__(
66
+ path_or_paths,
67
+ split=split,
68
+ features=features,
69
+ cache_dir=cache_dir,
70
+ keep_in_memory=keep_in_memory,
71
+ streaming=streaming,
72
+ num_proc=num_proc,
73
+ **kwargs,
74
+ )
75
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
76
+ hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
77
+ self.builder = Parquet(
78
+ cache_dir=cache_dir,
79
+ data_files=path_or_paths,
80
+ features=features,
81
+ hash=hash,
82
+ **kwargs,
83
+ )
84
+
85
+ def read(self):
86
+ # Build iterable dataset
87
+ if self.streaming:
88
+ dataset = self.builder.as_streaming_dataset(split=self.split)
89
+ # Build regular (map-style) dataset
90
+ else:
91
+ download_config = None
92
+ download_mode = None
93
+ verification_mode = None
94
+ base_path = None
95
+
96
+ self.builder.download_and_prepare(
97
+ download_config=download_config,
98
+ download_mode=download_mode,
99
+ verification_mode=verification_mode,
100
+ base_path=base_path,
101
+ num_proc=self.num_proc,
102
+ )
103
+ dataset = self.builder.as_dataset(
104
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
105
+ )
106
+ return dataset
107
+
108
+
109
+ class ParquetDatasetWriter:
110
+ def __init__(
111
+ self,
112
+ dataset: Dataset,
113
+ path_or_buf: Union[PathLike, BinaryIO],
114
+ batch_size: Optional[int] = None,
115
+ storage_options: Optional[dict] = None,
116
+ **parquet_writer_kwargs,
117
+ ):
118
+ self.dataset = dataset
119
+ self.path_or_buf = path_or_buf
120
+ self.batch_size = batch_size or get_writer_batch_size(dataset.features)
121
+ self.storage_options = storage_options or {}
122
+ self.parquet_writer_kwargs = parquet_writer_kwargs
123
+
124
+ def write(self) -> int:
125
+ batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
126
+
127
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
128
+ with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer:
129
+ written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
130
+ else:
131
+ written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
132
+ return written
133
+
134
+ def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
135
+ """Writes the pyarrow table as Parquet to a binary file handle.
136
+
137
+ Caller is responsible for opening and closing the handle.
138
+ """
139
+ written = 0
140
+ _ = parquet_writer_kwargs.pop("path_or_buf", None)
141
+ schema = self.dataset.features.arrow_schema
142
+
143
+ writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
144
+
145
+ for offset in hf_tqdm(
146
+ range(0, len(self.dataset), batch_size),
147
+ unit="ba",
148
+ desc="Creating parquet from Arrow format",
149
+ ):
150
+ batch = query_table(
151
+ table=self.dataset._data,
152
+ key=slice(offset, offset + batch_size),
153
+ indices=self.dataset._indices,
154
+ )
155
+ writer.write_table(batch)
156
+ written += batch.nbytes
157
+ writer.close()
158
+ return written
llmeval-env/lib/python3.10/site-packages/datasets/io/spark.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import pyspark
4
+
5
+ from .. import Features, NamedSplit
6
+ from ..download import DownloadMode
7
+ from ..packaged_modules.spark.spark import Spark
8
+ from .abc import AbstractDatasetReader
9
+
10
+
11
+ class SparkDatasetReader(AbstractDatasetReader):
12
+ """A dataset reader that reads from a Spark DataFrame.
13
+
14
+ When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
15
+ provided. Streaming is not currently supported.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ df: pyspark.sql.DataFrame,
21
+ split: Optional[NamedSplit] = None,
22
+ features: Optional[Features] = None,
23
+ streaming: bool = True,
24
+ cache_dir: str = None,
25
+ keep_in_memory: bool = False,
26
+ working_dir: str = None,
27
+ load_from_cache_file: bool = True,
28
+ file_format: str = "arrow",
29
+ **kwargs,
30
+ ):
31
+ super().__init__(
32
+ split=split,
33
+ features=features,
34
+ cache_dir=cache_dir,
35
+ keep_in_memory=keep_in_memory,
36
+ streaming=streaming,
37
+ **kwargs,
38
+ )
39
+ self._load_from_cache_file = load_from_cache_file
40
+ self._file_format = file_format
41
+ self.builder = Spark(
42
+ df=df,
43
+ features=features,
44
+ cache_dir=cache_dir,
45
+ working_dir=working_dir,
46
+ **kwargs,
47
+ )
48
+
49
+ def read(self):
50
+ if self.streaming:
51
+ return self.builder.as_streaming_dataset(split=self.split)
52
+ download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
53
+ self.builder.download_and_prepare(
54
+ download_mode=download_mode,
55
+ file_format=self._file_format,
56
+ )
57
+ return self.builder.as_dataset(split=self.split)