applied-ai-018 commited on
Commit
fe3c857
·
verified ·
1 Parent(s): dffa97c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg.pt +3 -0
  7. venv/lib/python3.10/site-packages/datasets/__init__.py +70 -0
  8. venv/lib/python3.10/site-packages/datasets/arrow_dataset.py +0 -0
  9. venv/lib/python3.10/site-packages/datasets/arrow_reader.py +663 -0
  10. venv/lib/python3.10/site-packages/datasets/arrow_writer.py +746 -0
  11. venv/lib/python3.10/site-packages/datasets/builder.bak.py +0 -0
  12. venv/lib/python3.10/site-packages/datasets/builder.py +0 -0
  13. venv/lib/python3.10/site-packages/datasets/config.py +272 -0
  14. venv/lib/python3.10/site-packages/datasets/data_files.py +821 -0
  15. venv/lib/python3.10/site-packages/datasets/distributed.py +39 -0
  16. venv/lib/python3.10/site-packages/datasets/features/__init__.py +20 -0
  17. venv/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/datasets/features/audio.py +277 -0
  23. venv/lib/python3.10/site-packages/datasets/features/features.py +2202 -0
  24. venv/lib/python3.10/site-packages/datasets/features/image.py +383 -0
  25. venv/lib/python3.10/site-packages/datasets/features/translation.py +129 -0
  26. venv/lib/python3.10/site-packages/datasets/filesystems/__init__.py +69 -0
  27. venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/datasets/filesystems/compression.py +123 -0
  31. venv/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py +116 -0
  32. venv/lib/python3.10/site-packages/datasets/fingerprint.py +494 -0
  33. venv/lib/python3.10/site-packages/datasets/info.py +593 -0
  34. venv/lib/python3.10/site-packages/datasets/iterable_dataset.py +0 -0
  35. venv/lib/python3.10/site-packages/datasets/keyhash.py +104 -0
  36. venv/lib/python3.10/site-packages/datasets/load.py +0 -0
  37. venv/lib/python3.10/site-packages/datasets/metric.py +652 -0
  38. venv/lib/python3.10/site-packages/datasets/naming.py +84 -0
  39. venv/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py +71 -0
  40. venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py +0 -0
  41. venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py +74 -0
  44. venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py +0 -0
  45. venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py +68 -0
  48. venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py +0 -0
  49. venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc +0 -0
ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f90617ba006aed49375457946eab401c0c7ee4953e2214115c931ca580cfca89
3
+ size 33555627
ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77fc38238a48dd4480293df4bf9feae153c0490b3bcd8cf9932f43d8c676516a
3
+ size 9372
ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adc3909ccbac7d2fad5193d34924ce0b6a18673a28ffbd4d854aee83506cf9e2
3
+ size 9293
ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f247f9cad8f6a73ab8bda6461a4a4c3f94166313820616b88aa28684e183f38
3
+ size 33555627
ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d00e1626357023b7d7af5969a0ca7c09611c5aa31e7abd528e8486ae0e0db25
3
+ size 33555533
ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b5a6c2d362fd03c23d099b511d7d4c92c5f62c53c6c6877e40582573ae408b7
3
+ size 50332828
venv/lib/python3.10/site-packages/datasets/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ __version__ = "2.19.0"
17
+
18
+ from .arrow_dataset import Dataset
19
+ from .arrow_reader import ReadInstruction
20
+ from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
21
+ from .combine import concatenate_datasets, interleave_datasets
22
+ from .dataset_dict import DatasetDict, IterableDatasetDict
23
+ from .download import *
24
+ from .features import *
25
+ from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
26
+ from .info import DatasetInfo, MetricInfo
27
+ from .inspect import (
28
+ get_dataset_config_info,
29
+ get_dataset_config_names,
30
+ get_dataset_default_config_name,
31
+ get_dataset_infos,
32
+ get_dataset_split_names,
33
+ inspect_dataset,
34
+ inspect_metric,
35
+ list_datasets,
36
+ list_metrics,
37
+ )
38
+ from .iterable_dataset import IterableDataset
39
+ from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
40
+ from .metric import Metric
41
+ from .splits import (
42
+ NamedSplit,
43
+ NamedSplitAll,
44
+ Split,
45
+ SplitBase,
46
+ SplitDict,
47
+ SplitGenerator,
48
+ SplitInfo,
49
+ SubSplitInfo,
50
+ percent,
51
+ )
52
+ from .tasks import *
53
+ from .utils import *
54
+ from .utils import logging
55
+
56
+
57
+ # deprecated modules
58
+ from datasets import arrow_dataset as _arrow_dataset # isort:skip
59
+ from datasets import utils as _utils # isort:skip
60
+ from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
61
+
62
+ _arrow_dataset.concatenate_datasets = concatenate_datasets
63
+ _utils.DownloadConfig = DownloadConfig
64
+ _utils.DownloadManager = DownloadManager
65
+ _utils.DownloadMode = DownloadMode
66
+ _deprecated_download_manager.DownloadConfig = DownloadConfig
67
+ _deprecated_download_manager.DownloadMode = DownloadMode
68
+ _deprecated_download_manager.DownloadManager = DownloadManager
69
+
70
+ del _arrow_dataset, _utils, _deprecated_download_manager
venv/lib/python3.10/site-packages/datasets/arrow_dataset.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/datasets/arrow_reader.py ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Arrow ArrowReader."""
17
+
18
+ import copy
19
+ import math
20
+ import os
21
+ import re
22
+ import shutil
23
+ from dataclasses import dataclass
24
+ from functools import partial
25
+ from pathlib import Path
26
+ from typing import TYPE_CHECKING, List, Optional, Union
27
+
28
+ import pyarrow as pa
29
+ import pyarrow.parquet as pq
30
+ from tqdm.contrib.concurrent import thread_map
31
+
32
+ from .download.download_config import DownloadConfig
33
+ from .naming import _split_re, filenames_for_dataset_split
34
+ from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables
35
+ from .utils import logging
36
+ from .utils import tqdm as hf_tqdm
37
+ from .utils.deprecation_utils import deprecated
38
+ from .utils.file_utils import cached_path
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .info import DatasetInfo # noqa: F401
43
+ from .splits import Split, SplitInfo # noqa: F401
44
+
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+ HF_GCP_BASE_URL = "https://storage.googleapis.com/huggingface-nlp/cache/datasets"
49
+
50
+ _SUB_SPEC_RE = re.compile(
51
+ rf"""
52
+ ^
53
+ (?P<split>{_split_re[1:-1]})
54
+ (\[
55
+ ((?P<from>-?\d+)
56
+ (?P<from_pct>%)?)?
57
+ :
58
+ ((?P<to>-?\d+)
59
+ (?P<to_pct>%)?)?
60
+ \])?(\((?P<rounding>[^\)]*)\))?
61
+ $
62
+ """, # remove ^ and $
63
+ re.X,
64
+ )
65
+
66
+ _ADDITION_SEP_RE = re.compile(r"\s*\+\s*")
67
+
68
+
69
+ class DatasetNotOnHfGcsError(ConnectionError):
70
+ """When you can't get the dataset from the Hf google cloud storage"""
71
+
72
+ pass
73
+
74
+
75
+ class MissingFilesOnHfGcsError(ConnectionError):
76
+ """When some files are missing on the Hf oogle cloud storage"""
77
+
78
+ pass
79
+
80
+
81
+ @dataclass(frozen=True)
82
+ class FileInstructions:
83
+ """The file instructions associated with a split ReadInstruction.
84
+
85
+ Attributes:
86
+ num_examples: `int`, The total number of examples
87
+ file_instructions: List[dict(filename, skip, take)], the files information.
88
+ The filenames contains the relative path, not absolute.
89
+ skip/take indicates which example read in the file: `ds.slice(skip, take)`
90
+ """
91
+
92
+ num_examples: int
93
+ file_instructions: List[dict]
94
+
95
+
96
+ def make_file_instructions(
97
+ name: str,
98
+ split_infos: List["SplitInfo"],
99
+ instruction: Union[str, "ReadInstruction"],
100
+ filetype_suffix: Optional[str] = None,
101
+ prefix_path: Optional[str] = None,
102
+ ) -> FileInstructions:
103
+ """Returns instructions of the split dict.
104
+
105
+ Args:
106
+ name (`str`): Name of the dataset.
107
+ split_infos (`list` of `[SplitInfo]`): Dataset splits information.
108
+ instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset.
109
+ filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'.
110
+ prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name.
111
+
112
+ Returns:
113
+ [`FileInstructions`]
114
+ """
115
+ if not isinstance(name, str):
116
+ raise TypeError(f"Expected str 'name', but got: {type(name).__name__}")
117
+ elif not name:
118
+ raise ValueError("Expected non-empty str 'name'")
119
+ name2len = {info.name: info.num_examples for info in split_infos}
120
+ name2shard_lengths = {info.name: info.shard_lengths for info in split_infos}
121
+ name2filenames = {
122
+ info.name: filenames_for_dataset_split(
123
+ path=prefix_path,
124
+ dataset_name=name,
125
+ split=info.name,
126
+ filetype_suffix=filetype_suffix,
127
+ shard_lengths=name2shard_lengths[info.name],
128
+ )
129
+ for info in split_infos
130
+ }
131
+ if not isinstance(instruction, ReadInstruction):
132
+ instruction = ReadInstruction.from_spec(instruction)
133
+ # Create the absolute instruction (per split)
134
+ absolute_instructions = instruction.to_absolute(name2len)
135
+
136
+ # For each split, return the files instruction (skip/take)
137
+ file_instructions = []
138
+ num_examples = 0
139
+ for abs_instr in absolute_instructions:
140
+ split_length = name2len[abs_instr.splitname]
141
+ filenames = name2filenames[abs_instr.splitname]
142
+ shard_lengths = name2shard_lengths[abs_instr.splitname]
143
+ from_ = 0 if abs_instr.from_ is None else abs_instr.from_
144
+ to = split_length if abs_instr.to is None else abs_instr.to
145
+ if shard_lengths is None: # not sharded
146
+ for filename in filenames:
147
+ take = to - from_
148
+ if take == 0:
149
+ continue
150
+ num_examples += take
151
+ file_instructions.append({"filename": filename, "skip": from_, "take": take})
152
+ else: # sharded
153
+ index_start = 0 # Beginning (included) of moving window.
154
+ index_end = 0 # End (excluded) of moving window.
155
+ for filename, shard_length in zip(filenames, shard_lengths):
156
+ index_end += shard_length
157
+ if from_ < index_end and to > index_start: # There is something to take.
158
+ skip = from_ - index_start if from_ > index_start else 0
159
+ take = to - index_start - skip if to < index_end else -1
160
+ if take == 0:
161
+ continue
162
+ file_instructions.append({"filename": filename, "skip": skip, "take": take})
163
+ num_examples += shard_length - skip if take == -1 else take
164
+ index_start += shard_length
165
+ return FileInstructions(
166
+ num_examples=num_examples,
167
+ file_instructions=file_instructions,
168
+ )
169
+
170
+
171
+ class BaseReader:
172
+ """
173
+ Build a Dataset object out of Instruction instance(s).
174
+ """
175
+
176
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
177
+ """Initializes ArrowReader.
178
+
179
+ Args:
180
+ path (str): path where tfrecords are stored.
181
+ info (DatasetInfo): info about the dataset.
182
+ """
183
+ self._path: str = path
184
+ self._info: Optional["DatasetInfo"] = info
185
+ self._filetype_suffix: Optional[str] = None
186
+
187
+ def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
188
+ """Returns a Dataset instance from given (filename, skip, take)."""
189
+ raise NotImplementedError
190
+
191
+ def _read_files(self, files, in_memory=False) -> Table:
192
+ """Returns Dataset for given file instructions.
193
+
194
+ Args:
195
+ files: List[dict(filename, skip, take)], the files information.
196
+ The filenames contain the absolute path, not relative.
197
+ skip/take indicates which example read in the file: `ds.slice(skip, take)`
198
+ in_memory (bool, default False): Whether to copy the data in-memory.
199
+ """
200
+ if len(files) == 0 or not all(isinstance(f, dict) for f in files):
201
+ raise ValueError("please provide valid file informations")
202
+ files = copy.deepcopy(files)
203
+ for f in files:
204
+ f["filename"] = os.path.join(self._path, f["filename"])
205
+
206
+ pa_tables = thread_map(
207
+ partial(self._get_table_from_filename, in_memory=in_memory),
208
+ files,
209
+ tqdm_class=hf_tqdm,
210
+ desc="Loading dataset shards",
211
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
212
+ disable=len(files) <= 16 or None,
213
+ )
214
+ pa_tables = [t for t in pa_tables if len(t) > 0]
215
+ if not pa_tables and (self._info is None or self._info.features is None):
216
+ raise ValueError(
217
+ "Tried to read an empty table. Please specify at least info.features to create an empty table with the right type."
218
+ )
219
+ pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))]
220
+ pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0]
221
+ return pa_table
222
+
223
+ def get_file_instructions(self, name, instruction, split_infos):
224
+ """Return list of dict {'filename': str, 'skip': int, 'take': int}"""
225
+ file_instructions = make_file_instructions(
226
+ name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path
227
+ )
228
+ files = file_instructions.file_instructions
229
+ return files
230
+
231
+ def read(
232
+ self,
233
+ name,
234
+ instructions,
235
+ split_infos,
236
+ in_memory=False,
237
+ ):
238
+ """Returns Dataset instance(s).
239
+
240
+ Args:
241
+ name (str): name of the dataset.
242
+ instructions (ReadInstruction): instructions to read.
243
+ Instruction can be string and will then be passed to the Instruction
244
+ constructor as it.
245
+ split_infos (list of SplitInfo proto): the available splits for dataset.
246
+ in_memory (bool, default False): Whether to copy the data in-memory.
247
+
248
+ Returns:
249
+ kwargs to build a single Dataset instance.
250
+ """
251
+
252
+ files = self.get_file_instructions(name, instructions, split_infos)
253
+ if not files:
254
+ msg = f'Instruction "{instructions}" corresponds to no data!'
255
+ raise ValueError(msg)
256
+ return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory)
257
+
258
+ def read_files(
259
+ self,
260
+ files: List[dict],
261
+ original_instructions: Union[None, "ReadInstruction", "Split"] = None,
262
+ in_memory=False,
263
+ ):
264
+ """Returns single Dataset instance for the set of file instructions.
265
+
266
+ Args:
267
+ files: List[dict(filename, skip, take)], the files information.
268
+ The filenames contains the relative path, not absolute.
269
+ skip/take indicates which example read in the file: `ds.skip().take()`
270
+ original_instructions: store the original instructions used to build the dataset split in the dataset.
271
+ in_memory (bool, default False): Whether to copy the data in-memory.
272
+
273
+ Returns:
274
+ kwargs to build a Dataset instance.
275
+ """
276
+ # Prepend path to filename
277
+ pa_table = self._read_files(files, in_memory=in_memory)
278
+ # If original_instructions is not None, convert it to a human-readable NamedSplit
279
+ if original_instructions is not None:
280
+ from .splits import Split # noqa
281
+
282
+ split = Split(str(original_instructions))
283
+ else:
284
+ split = None
285
+ dataset_kwargs = {"arrow_table": pa_table, "info": self._info, "split": split}
286
+ return dataset_kwargs
287
+
288
+ @deprecated()
289
+ def download_from_hf_gcs(self, download_config: DownloadConfig, relative_data_dir):
290
+ """
291
+ Download the dataset files from the Hf GCS
292
+
293
+ Args:
294
+ dl_cache_dir: `str`, the local cache directory used to download files
295
+ relative_data_dir: `str`, the relative directory of the remote files from
296
+ the `datasets` directory on GCS.
297
+
298
+ """
299
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
300
+ try:
301
+ remote_dataset_info = os.path.join(remote_cache_dir, "dataset_info.json")
302
+ downloaded_dataset_info = cached_path(
303
+ remote_dataset_info.replace(os.sep, "/"), download_config=download_config
304
+ )
305
+ shutil.move(downloaded_dataset_info, os.path.join(self._path, "dataset_info.json"))
306
+ if self._info is not None:
307
+ self._info.update(self._info.from_directory(self._path))
308
+ except FileNotFoundError as err:
309
+ raise DatasetNotOnHfGcsError(err) from None
310
+ try:
311
+ for split in self._info.splits:
312
+ file_instructions = self.get_file_instructions(
313
+ name=self._info.builder_name,
314
+ instruction=split,
315
+ split_infos=self._info.splits.values(),
316
+ )
317
+ for file_instruction in file_instructions:
318
+ file_to_download = str(Path(file_instruction["filename"]).relative_to(self._path))
319
+ remote_prepared_filename = os.path.join(remote_cache_dir, file_to_download)
320
+ downloaded_prepared_filename = cached_path(
321
+ remote_prepared_filename.replace(os.sep, "/"), download_config=download_config
322
+ )
323
+ shutil.move(downloaded_prepared_filename, file_instruction["filename"])
324
+ except FileNotFoundError as err:
325
+ raise MissingFilesOnHfGcsError(err) from None
326
+
327
+
328
+ class ArrowReader(BaseReader):
329
+ """
330
+ Build a Dataset object out of Instruction instance(s).
331
+ This Reader uses either memory mapping or file descriptors (in-memory) on arrow files.
332
+ """
333
+
334
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
335
+ """Initializes ArrowReader.
336
+
337
+ Args:
338
+ path (str): path where Arrow files are stored.
339
+ info (DatasetInfo): info about the dataset.
340
+ """
341
+ super().__init__(path, info)
342
+ self._filetype_suffix = "arrow"
343
+
344
+ def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
345
+ """Returns a Dataset instance from given (filename, skip, take)."""
346
+ filename, skip, take = (
347
+ filename_skip_take["filename"],
348
+ filename_skip_take["skip"] if "skip" in filename_skip_take else None,
349
+ filename_skip_take["take"] if "take" in filename_skip_take else None,
350
+ )
351
+ table = ArrowReader.read_table(filename, in_memory=in_memory)
352
+ if take == -1:
353
+ take = len(table) - skip
354
+ # here we don't want to slice an empty table, or it may segfault
355
+ if skip is not None and take is not None and not (skip == 0 and take == len(table)):
356
+ table = table.slice(skip, take)
357
+ return table
358
+
359
+ @staticmethod
360
+ def read_table(filename, in_memory=False) -> Table:
361
+ """
362
+ Read table from file.
363
+
364
+ Args:
365
+ filename (str): File name of the table.
366
+ in_memory (bool, default=False): Whether to copy the data in-memory.
367
+
368
+ Returns:
369
+ pyarrow.Table
370
+ """
371
+ table_cls = InMemoryTable if in_memory else MemoryMappedTable
372
+ return table_cls.from_file(filename)
373
+
374
+
375
+ class ParquetReader(BaseReader):
376
+ """
377
+ Build a Dataset object out of Instruction instance(s).
378
+ This Reader uses memory mapping on parquet files.
379
+ """
380
+
381
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
382
+ """Initializes ParquetReader.
383
+
384
+ Args:
385
+ path (str): path where tfrecords are stored.
386
+ info (DatasetInfo): info about the dataset.
387
+ """
388
+ super().__init__(path, info)
389
+ self._filetype_suffix = "parquet"
390
+
391
+ def _get_table_from_filename(self, filename_skip_take, **kwargs):
392
+ """Returns a Dataset instance from given (filename, skip, take)."""
393
+ filename, skip, take = (
394
+ filename_skip_take["filename"],
395
+ filename_skip_take["skip"] if "skip" in filename_skip_take else None,
396
+ filename_skip_take["take"] if "take" in filename_skip_take else None,
397
+ )
398
+ # Parquet read_table always loads data in memory, independently of memory_map
399
+ pa_table = pq.read_table(filename, memory_map=True)
400
+ # here we don't want to slice an empty table, or it may segfault
401
+ if skip is not None and take is not None and not (skip == 0 and take == len(pa_table)):
402
+ pa_table = pa_table.slice(skip, take)
403
+ return pa_table
404
+
405
+
406
+ @dataclass(frozen=True)
407
+ class _AbsoluteInstruction:
408
+ """A machine friendly slice: defined absolute positive boundaries."""
409
+
410
+ splitname: str
411
+ from_: int # uint (starting index).
412
+ to: int # uint (ending index).
413
+
414
+
415
+ @dataclass(frozen=True)
416
+ class _RelativeInstruction:
417
+ """Represents a single parsed slicing instruction, can use % and negatives."""
418
+
419
+ splitname: str
420
+ from_: Optional[int] = None # int (starting index) or None if no lower boundary.
421
+ to: Optional[int] = None # int (ending index) or None if no upper boundary.
422
+ unit: Optional[str] = None
423
+ rounding: Optional[str] = None
424
+
425
+ def __post_init__(self):
426
+ if self.unit is not None and self.unit not in ["%", "abs"]:
427
+ raise ValueError("unit must be either % or abs")
428
+ if self.rounding is not None and self.rounding not in ["closest", "pct1_dropremainder"]:
429
+ raise ValueError("rounding must be either closest or pct1_dropremainder")
430
+ if self.unit != "%" and self.rounding is not None:
431
+ raise ValueError("It is forbidden to specify rounding if not using percent slicing.")
432
+ if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100:
433
+ raise ValueError("Percent slice boundaries must be > -100 and < 100.")
434
+ if self.unit == "%" and self.to is not None and abs(self.to) > 100:
435
+ raise ValueError("Percent slice boundaries must be > -100 and < 100.")
436
+ # Update via __dict__ due to instance being "frozen"
437
+ self.__dict__["rounding"] = "closest" if self.rounding is None and self.unit == "%" else self.rounding
438
+
439
+
440
+ def _str_to_read_instruction(spec):
441
+ """Returns ReadInstruction for given string."""
442
+ res = _SUB_SPEC_RE.match(spec)
443
+ if not res:
444
+ raise ValueError(f"Unrecognized instruction format: {spec}")
445
+ unit = "%" if res.group("from_pct") or res.group("to_pct") else "abs"
446
+ return ReadInstruction(
447
+ split_name=res.group("split"),
448
+ rounding=res.group("rounding"),
449
+ from_=int(res.group("from")) if res.group("from") else None,
450
+ to=int(res.group("to")) if res.group("to") else None,
451
+ unit=unit,
452
+ )
453
+
454
+
455
+ def _pct_to_abs_pct1(boundary, num_examples):
456
+ # Using math.trunc here, since -99.5% should give -99%, not -100%.
457
+ if num_examples < 100:
458
+ msg = (
459
+ 'Using "pct1_dropremainder" rounding on a split with less than 100 '
460
+ "elements is forbidden: it always results in an empty dataset."
461
+ )
462
+ raise ValueError(msg)
463
+ return boundary * math.trunc(num_examples / 100.0)
464
+
465
+
466
+ def _pct_to_abs_closest(boundary, num_examples):
467
+ return int(round(boundary * num_examples / 100.0))
468
+
469
+
470
+ def _rel_to_abs_instr(rel_instr, name2len):
471
+ """Returns _AbsoluteInstruction instance for given RelativeInstruction.
472
+
473
+ Args:
474
+ rel_instr: RelativeInstruction instance.
475
+ name2len: dict {split_name: num_examples}.
476
+ """
477
+ pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == "closest" else _pct_to_abs_pct1
478
+ split = rel_instr.splitname
479
+ if split not in name2len:
480
+ raise ValueError(f'Unknown split "{split}". Should be one of {list(name2len)}.')
481
+ num_examples = name2len[split]
482
+ from_ = rel_instr.from_
483
+ to = rel_instr.to
484
+ if rel_instr.unit == "%":
485
+ from_ = 0 if from_ is None else pct_to_abs(from_, num_examples)
486
+ to = num_examples if to is None else pct_to_abs(to, num_examples)
487
+ else:
488
+ from_ = 0 if from_ is None else from_
489
+ to = num_examples if to is None else to
490
+ if from_ < 0:
491
+ from_ = max(num_examples + from_, 0)
492
+ if to < 0:
493
+ to = max(num_examples + to, 0)
494
+ from_ = min(from_, num_examples)
495
+ to = min(to, num_examples)
496
+ return _AbsoluteInstruction(split, from_, to)
497
+
498
+
499
+ class ReadInstruction:
500
+ """Reading instruction for a dataset.
501
+
502
+ Examples::
503
+
504
+ # The following lines are equivalent:
505
+ ds = datasets.load_dataset('mnist', split='test[:33%]')
506
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]'))
507
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%'))
508
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
509
+ 'test', from_=0, to=33, unit='%'))
510
+
511
+ # The following lines are equivalent:
512
+ ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]')
513
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
514
+ 'test[:33%]+train[1:-1]'))
515
+ ds = datasets.load_dataset('mnist', split=(
516
+ datasets.ReadInstruction('test', to=33, unit='%') +
517
+ datasets.ReadInstruction('train', from_=1, to=-1, unit='abs')))
518
+
519
+ # The following lines are equivalent:
520
+ ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)')
521
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
522
+ 'test[:33%](pct1_dropremainder)'))
523
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
524
+ 'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder"))
525
+
526
+ # 10-fold validation:
527
+ tests = datasets.load_dataset(
528
+ 'mnist',
529
+ [datasets.ReadInstruction('train', from_=k, to=k+10, unit='%')
530
+ for k in range(0, 100, 10)])
531
+ trains = datasets.load_dataset(
532
+ 'mnist',
533
+ [datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%')
534
+ for k in range(0, 100, 10)])
535
+
536
+ """
537
+
538
+ def _init(self, relative_instructions):
539
+ # Private initializer.
540
+ self._relative_instructions = relative_instructions
541
+
542
+ @classmethod
543
+ def _read_instruction_from_relative_instructions(cls, relative_instructions):
544
+ """Returns ReadInstruction obj initialized with relative_instructions."""
545
+ # Use __new__ to bypass __init__ used by public API and not conveniant here.
546
+ result = cls.__new__(cls)
547
+ result._init(relative_instructions) # pylint: disable=protected-access
548
+ return result
549
+
550
+ def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None):
551
+ """Initialize ReadInstruction.
552
+
553
+ Args:
554
+ split_name (str): name of the split to read. Eg: 'train'.
555
+ rounding (str, optional): The rounding behaviour to use when percent slicing is
556
+ used. Ignored when slicing with absolute indices.
557
+ Possible values:
558
+ - 'closest' (default): The specified percentages are rounded to the
559
+ closest value. Use this if you want specified percents to be as
560
+ much exact as possible.
561
+ - 'pct1_dropremainder': the specified percentages are treated as
562
+ multiple of 1%. Use this option if you want consistency. Eg:
563
+ len(5%) == 5 * len(1%).
564
+ Using this option, one might not be able to use the full set of
565
+ examples, if the number of those is not a multiple of 100.
566
+ from_ (int):
567
+ to (int): alternative way of specifying slicing boundaries. If any of
568
+ {from_, to, unit} argument is used, slicing cannot be specified as
569
+ string.
570
+ unit (str): optional, one of:
571
+ '%': to set the slicing unit as percents of the split size.
572
+ 'abs': to set the slicing unit as absolute numbers.
573
+ """
574
+ # This constructor is not always called. See factory method
575
+ # `_read_instruction_from_relative_instructions`. Common init instructions
576
+ # MUST be placed in the _init method.
577
+ self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)])
578
+
579
+ @classmethod
580
+ def from_spec(cls, spec):
581
+ """Creates a `ReadInstruction` instance out of a string spec.
582
+
583
+ Args:
584
+ spec (`str`):
585
+ Split(s) + optional slice(s) to read + optional rounding
586
+ if percents are used as the slicing unit. A slice can be specified,
587
+ using absolute numbers (`int`) or percentages (`int`).
588
+
589
+ Examples:
590
+
591
+ ```
592
+ test: test split.
593
+ test + validation: test split + validation split.
594
+ test[10:]: test split, minus its first 10 records.
595
+ test[:10%]: first 10% records of test split.
596
+ test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding.
597
+ test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train.
598
+ ```
599
+
600
+ Returns:
601
+ ReadInstruction instance.
602
+ """
603
+ spec = str(spec) # Need to convert to str in case of NamedSplit instance.
604
+ subs = _ADDITION_SEP_RE.split(spec)
605
+ if not subs:
606
+ raise ValueError(f"No instructions could be built out of {spec}")
607
+ instruction = _str_to_read_instruction(subs[0])
608
+ return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction)
609
+
610
+ def to_spec(self):
611
+ rel_instr_specs = []
612
+ for rel_instr in self._relative_instructions:
613
+ rel_instr_spec = rel_instr.splitname
614
+ if rel_instr.from_ is not None or rel_instr.to is not None:
615
+ from_ = rel_instr.from_
616
+ to = rel_instr.to
617
+ unit = rel_instr.unit
618
+ rounding = rel_instr.rounding
619
+ unit = unit if unit == "%" else ""
620
+ from_ = str(from_) + unit if from_ is not None else ""
621
+ to = str(to) + unit if to is not None else ""
622
+ slice_str = f"[{from_}:{to}]"
623
+ rounding_str = (
624
+ f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else ""
625
+ )
626
+ rel_instr_spec += slice_str + rounding_str
627
+ rel_instr_specs.append(rel_instr_spec)
628
+ return "+".join(rel_instr_specs)
629
+
630
+ def __add__(self, other):
631
+ """Returns a new ReadInstruction obj, result of appending other to self."""
632
+ if not isinstance(other, ReadInstruction):
633
+ msg = "ReadInstruction can only be added to another ReadInstruction obj."
634
+ raise TypeError(msg)
635
+ self_ris = self._relative_instructions
636
+ other_ris = other._relative_instructions # pylint: disable=protected-access
637
+ if (
638
+ self_ris[0].unit != "abs"
639
+ and other_ris[0].unit != "abs"
640
+ and self._relative_instructions[0].rounding != other_ris[0].rounding
641
+ ):
642
+ raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.")
643
+ return self._read_instruction_from_relative_instructions(self_ris + other_ris)
644
+
645
+ def __str__(self):
646
+ return self.to_spec()
647
+
648
+ def __repr__(self):
649
+ return f"ReadInstruction({self._relative_instructions})"
650
+
651
+ def to_absolute(self, name2len):
652
+ """Translate instruction into a list of absolute instructions.
653
+
654
+ Those absolute instructions are then to be added together.
655
+
656
+ Args:
657
+ name2len (`dict`):
658
+ Associating split names to number of examples.
659
+
660
+ Returns:
661
+ list of _AbsoluteInstruction instances (corresponds to the + in spec).
662
+ """
663
+ return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions]
venv/lib/python3.10/site-packages/datasets/arrow_writer.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ # Lint as: python3
14
+ """To write records into Parquet files."""
15
+
16
+ import errno
17
+ import json
18
+ import os
19
+ import sys
20
+ from pathlib import Path
21
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
22
+
23
+ import fsspec
24
+ import numpy as np
25
+ import pyarrow as pa
26
+ import pyarrow.parquet as pq
27
+ from fsspec.core import url_to_fs
28
+
29
+ from . import config
30
+ from .features import Features, Image, Value
31
+ from .features.features import (
32
+ FeatureType,
33
+ _ArrayXDExtensionType,
34
+ cast_to_python_objects,
35
+ generate_from_arrow_type,
36
+ get_nested_type,
37
+ list_of_np_array_to_pyarrow_listarray,
38
+ numpy_to_pyarrow_listarray,
39
+ to_pyarrow_listarray,
40
+ )
41
+ from .filesystems import is_remote_filesystem
42
+ from .info import DatasetInfo
43
+ from .keyhash import DuplicatedKeysError, KeyHasher
44
+ from .table import array_cast, cast_array_to_feature, embed_table_storage, table_cast
45
+ from .utils import logging
46
+ from .utils import tqdm as hf_tqdm
47
+ from .utils.file_utils import hash_url_to_filename
48
+ from .utils.py_utils import asdict, first_non_null_value
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ type_ = type # keep python's type function
54
+
55
+
56
+ class SchemaInferenceError(ValueError):
57
+ pass
58
+
59
+
60
+ class TypedSequence:
61
+ """
62
+ This data container generalizes the typing when instantiating pyarrow arrays, tables or batches.
63
+
64
+ More specifically it adds several features:
65
+ - Support extension types like ``datasets.features.Array2DExtensionType``:
66
+ By default pyarrow arrays don't return extension arrays. One has to call
67
+ ``pa.ExtensionArray.from_storage(type, pa.array(data, type.storage_type))``
68
+ in order to get an extension array.
69
+ - Support for ``try_type`` parameter that can be used instead of ``type``:
70
+ When an array is transformed, we like to keep the same type as before if possible.
71
+ For example when calling :func:`datasets.Dataset.map`, we don't want to change the type
72
+ of each column by default.
73
+ - Better error message when a pyarrow array overflows.
74
+
75
+ Example::
76
+
77
+ from datasets.features import Array2D, Array2DExtensionType, Value
78
+ from datasets.arrow_writer import TypedSequence
79
+ import pyarrow as pa
80
+
81
+ arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32")))
82
+ assert arr.type == pa.int32()
83
+
84
+ arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32")))
85
+ assert arr.type == pa.int32()
86
+
87
+ arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int32")))
88
+ assert arr.type == pa.string()
89
+
90
+ arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")))
91
+ assert arr.type == Array2DExtensionType((1, 3), "int64")
92
+
93
+ table = pa.Table.from_pydict({
94
+ "image": TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))
95
+ })
96
+ assert table["image"].type == Array2DExtensionType((1, 3), "int64")
97
+
98
+ """
99
+
100
+ def __init__(
101
+ self,
102
+ data: Iterable,
103
+ type: Optional[FeatureType] = None,
104
+ try_type: Optional[FeatureType] = None,
105
+ optimized_int_type: Optional[FeatureType] = None,
106
+ ):
107
+ # assert type is None or try_type is None,
108
+ if type is not None and try_type is not None:
109
+ raise ValueError("You cannot specify both type and try_type")
110
+ # set attributes
111
+ self.data = data
112
+ self.type = type
113
+ self.try_type = try_type # is ignored if it doesn't match the data
114
+ self.optimized_int_type = optimized_int_type
115
+ # when trying a type (is ignored if data is not compatible)
116
+ self.trying_type = self.try_type is not None
117
+ self.trying_int_optimization = optimized_int_type is not None and type is None and try_type is None
118
+ # used to get back the inferred type after __arrow_array__() is called once
119
+ self._inferred_type = None
120
+
121
+ def get_inferred_type(self) -> FeatureType:
122
+ """Return the inferred feature type.
123
+ This is done by converting the sequence to an Arrow array, and getting the corresponding
124
+ feature type.
125
+
126
+ Since building the Arrow array can be expensive, the value of the inferred type is cached
127
+ as soon as pa.array is called on the typed sequence.
128
+
129
+ Returns:
130
+ FeatureType: inferred feature type of the sequence.
131
+ """
132
+ if self._inferred_type is None:
133
+ self._inferred_type = generate_from_arrow_type(pa.array(self).type)
134
+ return self._inferred_type
135
+
136
+ @staticmethod
137
+ def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]:
138
+ """Implement type inference for custom objects like PIL.Image.Image -> Image type.
139
+
140
+ This function is only used for custom python objects that can't be direclty passed to build
141
+ an Arrow array. In such cases is infers the feature type to use, and it encodes the data so
142
+ that they can be passed to an Arrow array.
143
+
144
+ Args:
145
+ data (Iterable): array of data to infer the type, e.g. a list of PIL images.
146
+
147
+ Returns:
148
+ Tuple[Iterable, Optional[FeatureType]]: a tuple with:
149
+ - the (possibly encoded) array, if the inferred feature type requires encoding
150
+ - the inferred feature type if the array is made of supported custom objects like
151
+ PIL images, else None.
152
+ """
153
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
154
+ import PIL.Image
155
+
156
+ non_null_idx, non_null_value = first_non_null_value(data)
157
+ if isinstance(non_null_value, PIL.Image.Image):
158
+ return [Image().encode_example(value) if value is not None else None for value in data], Image()
159
+ return data, None
160
+
161
+ def __arrow_array__(self, type: Optional[pa.DataType] = None):
162
+ """This function is called when calling pa.array(typed_sequence)"""
163
+
164
+ if type is not None:
165
+ raise ValueError("TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)")
166
+ del type # make sure we don't use it
167
+ data = self.data
168
+ # automatic type inference for custom objects
169
+ if self.type is None and self.try_type is None:
170
+ data, self._inferred_type = self._infer_custom_type_and_encode(data)
171
+ if self._inferred_type is None:
172
+ type = self.try_type if self.trying_type else self.type
173
+ else:
174
+ type = self._inferred_type
175
+ pa_type = get_nested_type(type) if type is not None else None
176
+ optimized_int_pa_type = (
177
+ get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None
178
+ )
179
+ trying_cast_to_python_objects = False
180
+ try:
181
+ # custom pyarrow types
182
+ if isinstance(pa_type, _ArrayXDExtensionType):
183
+ storage = to_pyarrow_listarray(data, pa_type)
184
+ return pa.ExtensionArray.from_storage(pa_type, storage)
185
+
186
+ # efficient np array to pyarrow array
187
+ if isinstance(data, np.ndarray):
188
+ out = numpy_to_pyarrow_listarray(data)
189
+ elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray):
190
+ out = list_of_np_array_to_pyarrow_listarray(data)
191
+ else:
192
+ trying_cast_to_python_objects = True
193
+ out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
194
+ # use smaller integer precisions if possible
195
+ if self.trying_int_optimization:
196
+ if pa.types.is_int64(out.type):
197
+ out = out.cast(optimized_int_pa_type)
198
+ elif pa.types.is_list(out.type):
199
+ if pa.types.is_int64(out.type.value_type):
200
+ out = array_cast(out, pa.list_(optimized_int_pa_type))
201
+ elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type):
202
+ out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type)))
203
+ # otherwise we can finally use the user's type
204
+ elif type is not None:
205
+ # We use cast_array_to_feature to support casting to custom types like Audio and Image
206
+ # Also, when trying type "string", we don't want to convert integers or floats to "string".
207
+ # We only do it if trying_type is False - since this is what the user asks for.
208
+ out = cast_array_to_feature(
209
+ out, type, allow_primitive_to_str=not self.trying_type, allow_decimal_to_str=not self.trying_type
210
+ )
211
+ return out
212
+ except (
213
+ TypeError,
214
+ pa.lib.ArrowInvalid,
215
+ pa.lib.ArrowNotImplementedError,
216
+ ) as e: # handle type errors and overflows
217
+ # Ignore ArrowNotImplementedError caused by trying type, otherwise re-raise
218
+ if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError):
219
+ raise
220
+
221
+ if self.trying_type:
222
+ try: # second chance
223
+ if isinstance(data, np.ndarray):
224
+ return numpy_to_pyarrow_listarray(data)
225
+ elif isinstance(data, list) and data and any(isinstance(value, np.ndarray) for value in data):
226
+ return list_of_np_array_to_pyarrow_listarray(data)
227
+ else:
228
+ trying_cast_to_python_objects = True
229
+ return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
230
+ except pa.lib.ArrowInvalid as e:
231
+ if "overflow" in str(e):
232
+ raise OverflowError(
233
+ f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
234
+ ) from None
235
+ elif self.trying_int_optimization and "not in range" in str(e):
236
+ optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
237
+ logger.info(
238
+ f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64."
239
+ )
240
+ return out
241
+ elif trying_cast_to_python_objects and "Could not convert" in str(e):
242
+ out = pa.array(
243
+ cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)
244
+ )
245
+ if type is not None:
246
+ out = cast_array_to_feature(
247
+ out, type, allow_primitive_to_str=True, allow_decimal_to_str=True
248
+ )
249
+ return out
250
+ else:
251
+ raise
252
+ elif "overflow" in str(e):
253
+ raise OverflowError(
254
+ f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
255
+ ) from None
256
+ elif self.trying_int_optimization and "not in range" in str(e):
257
+ optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
258
+ logger.info(f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.")
259
+ return out
260
+ elif trying_cast_to_python_objects and "Could not convert" in str(e):
261
+ out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False))
262
+ if type is not None:
263
+ out = cast_array_to_feature(out, type, allow_primitive_to_str=True, allow_decimal_to_str=True)
264
+ return out
265
+ else:
266
+ raise
267
+
268
+
269
+ class OptimizedTypedSequence(TypedSequence):
270
+ def __init__(
271
+ self,
272
+ data,
273
+ type: Optional[FeatureType] = None,
274
+ try_type: Optional[FeatureType] = None,
275
+ col: Optional[str] = None,
276
+ optimized_int_type: Optional[FeatureType] = None,
277
+ ):
278
+ optimized_int_type_by_col = {
279
+ "attention_mask": Value("int8"), # binary tensor
280
+ "special_tokens_mask": Value("int8"),
281
+ "input_ids": Value("int32"), # typical vocab size: 0-50k (max ~500k, never > 1M)
282
+ "token_type_ids": Value(
283
+ "int8"
284
+ ), # binary mask; some (XLNetModel) use an additional token represented by a 2
285
+ }
286
+ if type is None and try_type is None:
287
+ optimized_int_type = optimized_int_type_by_col.get(col, None)
288
+ super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type)
289
+
290
+
291
+ class ArrowWriter:
292
+ """Shuffles and writes Examples to Arrow files."""
293
+
294
+ _WRITER_CLASS = pa.RecordBatchStreamWriter
295
+
296
+ def __init__(
297
+ self,
298
+ schema: Optional[pa.Schema] = None,
299
+ features: Optional[Features] = None,
300
+ path: Optional[str] = None,
301
+ stream: Optional[pa.NativeFile] = None,
302
+ fingerprint: Optional[str] = None,
303
+ writer_batch_size: Optional[int] = None,
304
+ hash_salt: Optional[str] = None,
305
+ check_duplicates: Optional[bool] = False,
306
+ disable_nullable: bool = False,
307
+ update_features: bool = False,
308
+ with_metadata: bool = True,
309
+ unit: str = "examples",
310
+ embed_local_files: bool = False,
311
+ storage_options: Optional[dict] = None,
312
+ ):
313
+ if path is None and stream is None:
314
+ raise ValueError("At least one of path and stream must be provided.")
315
+ if features is not None:
316
+ self._features = features
317
+ self._schema = None
318
+ elif schema is not None:
319
+ self._schema: pa.Schema = schema
320
+ self._features = Features.from_arrow_schema(self._schema)
321
+ else:
322
+ self._features = None
323
+ self._schema = None
324
+
325
+ if hash_salt is not None:
326
+ # Create KeyHasher instance using split name as hash salt
327
+ self._hasher = KeyHasher(hash_salt)
328
+ else:
329
+ self._hasher = KeyHasher("")
330
+
331
+ self._check_duplicates = check_duplicates
332
+ self._disable_nullable = disable_nullable
333
+
334
+ if stream is None:
335
+ fs, path = url_to_fs(path, **(storage_options or {}))
336
+ self._fs: fsspec.AbstractFileSystem = fs
337
+ self._path = path if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(path)
338
+ self.stream = self._fs.open(path, "wb")
339
+ self._closable_stream = True
340
+ else:
341
+ self._fs = None
342
+ self._path = None
343
+ self.stream = stream
344
+ self._closable_stream = False
345
+
346
+ self.fingerprint = fingerprint
347
+ self.disable_nullable = disable_nullable
348
+ self.writer_batch_size = writer_batch_size or config.DEFAULT_MAX_BATCH_SIZE
349
+ self.update_features = update_features
350
+ self.with_metadata = with_metadata
351
+ self.unit = unit
352
+ self.embed_local_files = embed_local_files
353
+
354
+ self._num_examples = 0
355
+ self._num_bytes = 0
356
+ self.current_examples: List[Tuple[Dict[str, Any], str]] = []
357
+ self.current_rows: List[pa.Table] = []
358
+ self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None
359
+ self.hkey_record = []
360
+
361
+ def __len__(self):
362
+ """Return the number of writed and staged examples"""
363
+ return self._num_examples + len(self.current_examples) + len(self.current_rows)
364
+
365
+ def __enter__(self):
366
+ return self
367
+
368
+ def __exit__(self, exc_type, exc_val, exc_tb):
369
+ self.close()
370
+
371
+ def close(self):
372
+ # Try closing if opened; if closed: pyarrow.lib.ArrowInvalid: Invalid operation on closed file
373
+ if self.pa_writer: # it might be None
374
+ try:
375
+ self.pa_writer.close()
376
+ except Exception: # pyarrow.lib.ArrowInvalid, OSError
377
+ pass
378
+ if self._closable_stream and not self.stream.closed:
379
+ self.stream.close() # This also closes self.pa_writer if it is opened
380
+
381
+ def _build_writer(self, inferred_schema: pa.Schema):
382
+ schema = self.schema
383
+ inferred_features = Features.from_arrow_schema(inferred_schema)
384
+ if self._features is not None:
385
+ if self.update_features: # keep original features it they match, or update them
386
+ fields = {field.name: field for field in self._features.type}
387
+ for inferred_field in inferred_features.type:
388
+ name = inferred_field.name
389
+ if name in fields:
390
+ if inferred_field == fields[name]:
391
+ inferred_features[name] = self._features[name]
392
+ self._features = inferred_features
393
+ schema: pa.Schema = inferred_schema
394
+ else:
395
+ self._features = inferred_features
396
+ schema: pa.Schema = inferred_features.arrow_schema
397
+ if self.disable_nullable:
398
+ schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in schema)
399
+ if self.with_metadata:
400
+ schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint))
401
+ else:
402
+ schema = schema.with_metadata({})
403
+ self._schema = schema
404
+ self.pa_writer = self._WRITER_CLASS(self.stream, schema)
405
+
406
+ @property
407
+ def schema(self):
408
+ _schema = (
409
+ self._schema
410
+ if self._schema is not None
411
+ else (pa.schema(self._features.type) if self._features is not None else None)
412
+ )
413
+ if self._disable_nullable and _schema is not None:
414
+ _schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in _schema)
415
+ return _schema if _schema is not None else []
416
+
417
+ @staticmethod
418
+ def _build_metadata(info: DatasetInfo, fingerprint: Optional[str] = None) -> Dict[str, str]:
419
+ info_keys = ["features"] # we can add support for more DatasetInfo keys in the future
420
+ info_as_dict = asdict(info)
421
+ metadata = {}
422
+ metadata["info"] = {key: info_as_dict[key] for key in info_keys}
423
+ if fingerprint is not None:
424
+ metadata["fingerprint"] = fingerprint
425
+ return {"huggingface": json.dumps(metadata)}
426
+
427
+ def write_examples_on_file(self):
428
+ """Write stored examples from the write-pool of examples. It makes a table out of the examples and write it."""
429
+ if not self.current_examples:
430
+ return
431
+ # preserve the order the columns
432
+ if self.schema:
433
+ schema_cols = set(self.schema.names)
434
+ examples_cols = self.current_examples[0][0].keys() # .keys() preserves the order (unlike set)
435
+ common_cols = [col for col in self.schema.names if col in examples_cols]
436
+ extra_cols = [col for col in examples_cols if col not in schema_cols]
437
+ cols = common_cols + extra_cols
438
+ else:
439
+ cols = list(self.current_examples[0][0])
440
+ batch_examples = {}
441
+ for col in cols:
442
+ # We use row[0][col] since current_examples contains (example, key) tuples.
443
+ # Morever, examples could be Arrow arrays of 1 element.
444
+ # This can happen in `.map()` when we want to re-write the same Arrow data
445
+ if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples):
446
+ arrays = [row[0][col] for row in self.current_examples]
447
+ arrays = [
448
+ chunk
449
+ for array in arrays
450
+ for chunk in (array.chunks if isinstance(array, pa.ChunkedArray) else [array])
451
+ ]
452
+ batch_examples[col] = pa.concat_arrays(arrays)
453
+ else:
454
+ batch_examples[col] = [
455
+ row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]
456
+ for row in self.current_examples
457
+ ]
458
+ self.write_batch(batch_examples=batch_examples)
459
+ self.current_examples = []
460
+
461
+ def write_rows_on_file(self):
462
+ """Write stored rows from the write-pool of rows. It concatenates the single-row tables and it writes the resulting table."""
463
+ if not self.current_rows:
464
+ return
465
+ table = pa.concat_tables(self.current_rows)
466
+ self.write_table(table)
467
+ self.current_rows = []
468
+
469
+ def write(
470
+ self,
471
+ example: Dict[str, Any],
472
+ key: Optional[Union[str, int, bytes]] = None,
473
+ writer_batch_size: Optional[int] = None,
474
+ ):
475
+ """Add a given (Example,Key) pair to the write-pool of examples which is written to file.
476
+
477
+ Args:
478
+ example: the Example to add.
479
+ key: Optional, a unique identifier(str, int or bytes) associated with each example
480
+ """
481
+ # Utilize the keys and duplicate checking when `self._check_duplicates` is passed True
482
+ if self._check_duplicates:
483
+ # Create unique hash from key and store as (key, example) pairs
484
+ hash = self._hasher.hash(key)
485
+ self.current_examples.append((example, hash))
486
+ # Maintain record of keys and their respective hashes for checking duplicates
487
+ self.hkey_record.append((hash, key))
488
+ else:
489
+ # Store example as a tuple so as to keep the structure of `self.current_examples` uniform
490
+ self.current_examples.append((example, ""))
491
+
492
+ if writer_batch_size is None:
493
+ writer_batch_size = self.writer_batch_size
494
+ if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size:
495
+ if self._check_duplicates:
496
+ self.check_duplicate_keys()
497
+ # Re-intializing to empty list for next batch
498
+ self.hkey_record = []
499
+
500
+ self.write_examples_on_file()
501
+
502
+ def check_duplicate_keys(self):
503
+ """Raises error if duplicates found in a batch"""
504
+ tmp_record = set()
505
+ for hash, key in self.hkey_record:
506
+ if hash in tmp_record:
507
+ duplicate_key_indices = [
508
+ str(self._num_examples + index)
509
+ for index, (duplicate_hash, _) in enumerate(self.hkey_record)
510
+ if duplicate_hash == hash
511
+ ]
512
+
513
+ raise DuplicatedKeysError(key, duplicate_key_indices)
514
+ else:
515
+ tmp_record.add(hash)
516
+
517
+ def write_row(self, row: pa.Table, writer_batch_size: Optional[int] = None):
518
+ """Add a given single-row Table to the write-pool of rows which is written to file.
519
+
520
+ Args:
521
+ row: the row to add.
522
+ """
523
+ if len(row) != 1:
524
+ raise ValueError(f"Only single-row pyarrow tables are allowed but got table with {len(row)} rows.")
525
+ self.current_rows.append(row)
526
+ if writer_batch_size is None:
527
+ writer_batch_size = self.writer_batch_size
528
+ if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size:
529
+ self.write_rows_on_file()
530
+
531
+ def write_batch(
532
+ self,
533
+ batch_examples: Dict[str, List],
534
+ writer_batch_size: Optional[int] = None,
535
+ ):
536
+ """Write a batch of Example to file.
537
+ Ignores the batch if it appears to be empty,
538
+ preventing a potential schema update of unknown types.
539
+
540
+ Args:
541
+ batch_examples: the batch of examples to add.
542
+ """
543
+ if batch_examples and len(next(iter(batch_examples.values()))) == 0:
544
+ return
545
+ features = None if self.pa_writer is None and self.update_features else self._features
546
+ try_features = self._features if self.pa_writer is None and self.update_features else None
547
+ arrays = []
548
+ inferred_features = Features()
549
+ # preserve the order the columns
550
+ if self.schema:
551
+ schema_cols = set(self.schema.names)
552
+ batch_cols = batch_examples.keys() # .keys() preserves the order (unlike set)
553
+ common_cols = [col for col in self.schema.names if col in batch_cols]
554
+ extra_cols = [col for col in batch_cols if col not in schema_cols]
555
+ cols = common_cols + extra_cols
556
+ else:
557
+ cols = list(batch_examples)
558
+ for col in cols:
559
+ col_values = batch_examples[col]
560
+ col_type = features[col] if features else None
561
+ if isinstance(col_values, (pa.Array, pa.ChunkedArray)):
562
+ array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values
563
+ arrays.append(array)
564
+ inferred_features[col] = generate_from_arrow_type(col_values.type)
565
+ else:
566
+ col_try_type = try_features[col] if try_features is not None and col in try_features else None
567
+ typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)
568
+ arrays.append(pa.array(typed_sequence))
569
+ inferred_features[col] = typed_sequence.get_inferred_type()
570
+ schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema
571
+ pa_table = pa.Table.from_arrays(arrays, schema=schema)
572
+ self.write_table(pa_table, writer_batch_size)
573
+
574
+ def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None):
575
+ """Write a Table to file.
576
+
577
+ Args:
578
+ example: the Table to add.
579
+ """
580
+ if writer_batch_size is None:
581
+ writer_batch_size = self.writer_batch_size
582
+ if self.pa_writer is None:
583
+ self._build_writer(inferred_schema=pa_table.schema)
584
+ pa_table = pa_table.combine_chunks()
585
+ pa_table = table_cast(pa_table, self._schema)
586
+ if self.embed_local_files:
587
+ pa_table = embed_table_storage(pa_table)
588
+ self._num_bytes += pa_table.nbytes
589
+ self._num_examples += pa_table.num_rows
590
+ self.pa_writer.write_table(pa_table, writer_batch_size)
591
+
592
+ def finalize(self, close_stream=True):
593
+ self.write_rows_on_file()
594
+ # In case current_examples < writer_batch_size, but user uses finalize()
595
+ if self._check_duplicates:
596
+ self.check_duplicate_keys()
597
+ # Re-intializing to empty list for next batch
598
+ self.hkey_record = []
599
+ self.write_examples_on_file()
600
+ # If schema is known, infer features even if no examples were written
601
+ if self.pa_writer is None and self.schema:
602
+ self._build_writer(self.schema)
603
+ if self.pa_writer is not None:
604
+ self.pa_writer.close()
605
+ self.pa_writer = None
606
+ if close_stream:
607
+ self.stream.close()
608
+ else:
609
+ if close_stream:
610
+ self.stream.close()
611
+ raise SchemaInferenceError("Please pass `features` or at least one example when writing data")
612
+ logger.debug(
613
+ f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {self._path if self._path else ''}."
614
+ )
615
+ return self._num_examples, self._num_bytes
616
+
617
+
618
+ class ParquetWriter(ArrowWriter):
619
+ _WRITER_CLASS = pq.ParquetWriter
620
+
621
+
622
+ class BeamWriter:
623
+ """
624
+ Shuffles and writes Examples to Arrow files.
625
+ The Arrow files are converted from Parquet files that are the output of Apache Beam pipelines.
626
+ """
627
+
628
+ def __init__(
629
+ self,
630
+ features: Optional[Features] = None,
631
+ schema: Optional[pa.Schema] = None,
632
+ path: Optional[str] = None,
633
+ namespace: Optional[str] = None,
634
+ cache_dir: Optional[str] = None,
635
+ ):
636
+ if features is None and schema is None:
637
+ raise ValueError("At least one of features and schema must be provided.")
638
+ if path is None:
639
+ raise ValueError("Path must be provided.")
640
+
641
+ if features is not None:
642
+ self._features: Features = features
643
+ self._schema: pa.Schema = features.arrow_schema
644
+ else:
645
+ self._schema: pa.Schema = schema
646
+ self._features: Features = Features.from_arrow_schema(schema)
647
+
648
+ self._path = path
649
+ self._parquet_path = os.path.splitext(path)[0] # remove extension
650
+ self._namespace = namespace or "default"
651
+ self._num_examples = None
652
+ self._cache_dir = cache_dir or config.HF_DATASETS_CACHE
653
+
654
+ def write_from_pcollection(self, pcoll_examples):
655
+ """Add the final steps of the beam pipeline: write to parquet files."""
656
+ import apache_beam as beam
657
+
658
+ def inc_num_examples(example):
659
+ beam.metrics.Metrics.counter(self._namespace, "num_examples").inc()
660
+
661
+ # count examples
662
+ _ = pcoll_examples | "Count N. Examples" >> beam.Map(inc_num_examples)
663
+
664
+ # save dataset
665
+ return (
666
+ pcoll_examples
667
+ | "Get values" >> beam.Values()
668
+ | "Save to parquet"
669
+ >> beam.io.parquetio.WriteToParquet(
670
+ self._parquet_path, self._schema, shard_name_template="-SSSSS-of-NNNNN.parquet"
671
+ )
672
+ )
673
+
674
+ def finalize(self, metrics_query_result: dict):
675
+ """
676
+ Run after the pipeline has finished.
677
+ It converts the resulting parquet files to arrow and it completes the info from the pipeline metrics.
678
+
679
+ Args:
680
+ metrics_query_result: `dict` obtained from pipeline_results.metrics().query(m_filter). Make sure
681
+ that the filter keeps only the metrics for the considered split, under the namespace `split_name`.
682
+ """
683
+
684
+ # Beam FileSystems require the system's path separator in the older versions
685
+ fs, parquet_path = url_to_fs(self._parquet_path)
686
+ parquet_path = str(Path(parquet_path)) if not is_remote_filesystem(fs) else fs.unstrip_protocol(parquet_path)
687
+
688
+ shards = fs.glob(parquet_path + "*.parquet")
689
+ num_bytes = sum(fs.sizes(shards))
690
+ shard_lengths = get_parquet_lengths(shards)
691
+
692
+ # Convert to arrow
693
+ if self._path.endswith(".arrow"):
694
+ logger.info(f"Converting parquet files {self._parquet_path} to arrow {self._path}")
695
+ try: # stream conversion
696
+ num_bytes = 0
697
+ for shard in hf_tqdm(shards, unit="shards"):
698
+ with fs.open(shard, "rb") as source:
699
+ with fs.open(shard.replace(".parquet", ".arrow"), "wb") as destination:
700
+ shard_num_bytes, _ = parquet_to_arrow(source, destination)
701
+ num_bytes += shard_num_bytes
702
+ except OSError as e: # broken pipe can happen if the connection is unstable, do local conversion instead
703
+ if e.errno != errno.EPIPE: # not a broken pipe
704
+ raise
705
+ logger.warning(
706
+ "Broken Pipe during stream conversion from parquet to arrow. Using local convert instead"
707
+ )
708
+ local_convert_dir = os.path.join(self._cache_dir, "beam_convert")
709
+ os.makedirs(local_convert_dir, exist_ok=True)
710
+ num_bytes = 0
711
+ for shard in hf_tqdm(shards, unit="shards"):
712
+ local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(shard) + ".parquet")
713
+ fs.download(shard, local_parquet_path)
714
+ local_arrow_path = local_parquet_path.replace(".parquet", ".arrow")
715
+ shard_num_bytes, _ = parquet_to_arrow(local_parquet_path, local_arrow_path)
716
+ num_bytes += shard_num_bytes
717
+ remote_arrow_path = shard.replace(".parquet", ".arrow")
718
+ fs.upload(local_arrow_path, remote_arrow_path)
719
+
720
+ # Save metrics
721
+ counters_dict = {metric.key.metric.name: metric.result for metric in metrics_query_result["counters"]}
722
+ self._num_examples = counters_dict["num_examples"]
723
+ self._num_bytes = num_bytes
724
+ self._shard_lengths = shard_lengths
725
+ return self._num_examples, self._num_bytes
726
+
727
+
728
+ def get_parquet_lengths(sources) -> List[int]:
729
+ shard_lengths = []
730
+ for source in hf_tqdm(sources, unit="parquet files"):
731
+ parquet_file = pa.parquet.ParquetFile(source)
732
+ shard_lengths.append(parquet_file.metadata.num_rows)
733
+ return shard_lengths
734
+
735
+
736
+ def parquet_to_arrow(source, destination) -> List[int]:
737
+ """Convert parquet file to arrow file. Inputs can be str paths or file-like objects"""
738
+ stream = None if isinstance(destination, str) else destination
739
+ parquet_file = pa.parquet.ParquetFile(source)
740
+ # Beam can create empty Parquet files, so we need to pass the source Parquet file's schema
741
+ with ArrowWriter(schema=parquet_file.schema_arrow, path=destination, stream=stream) as writer:
742
+ for record_batch in parquet_file.iter_batches():
743
+ pa_table = pa.Table.from_batches([record_batch])
744
+ writer.write_table(pa_table)
745
+ num_bytes, num_examples = writer.finalize()
746
+ return num_bytes, num_examples
venv/lib/python3.10/site-packages/datasets/builder.bak.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/datasets/builder.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/datasets/config.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import importlib.metadata
3
+ import logging
4
+ import os
5
+ import platform
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ from packaging import version
10
+
11
+
12
+ logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging
13
+
14
+ # Datasets
15
+ S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets"
16
+ CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets"
17
+ REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}"
18
+
19
+ # Metrics
20
+ S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
21
+ CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
22
+ REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}"
23
+
24
+ # Hub
25
+ HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
26
+ HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
27
+ HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}"
28
+ HUB_DEFAULT_VERSION = "main"
29
+
30
+ PY_VERSION = version.parse(platform.python_version())
31
+
32
+ # General environment variables accepted values for booleans
33
+ ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
34
+ ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"}
35
+ ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
36
+ ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"})
37
+
38
+
39
+ # Imports
40
+ DILL_VERSION = version.parse(importlib.metadata.version("dill"))
41
+ FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec"))
42
+ PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
43
+ PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
44
+ HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub"))
45
+
46
+ USE_TF = os.environ.get("USE_TF", "AUTO").upper()
47
+ USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
48
+ USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
49
+
50
+ TORCH_VERSION = "N/A"
51
+ TORCH_AVAILABLE = False
52
+
53
+ if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
54
+ TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
55
+ if TORCH_AVAILABLE:
56
+ try:
57
+ TORCH_VERSION = version.parse(importlib.metadata.version("torch"))
58
+ logger.info(f"PyTorch version {TORCH_VERSION} available.")
59
+ except importlib.metadata.PackageNotFoundError:
60
+ pass
61
+ else:
62
+ logger.info("Disabling PyTorch because USE_TF is set")
63
+
64
+ POLARS_VERSION = "N/A"
65
+ POLARS_AVAILABLE = importlib.util.find_spec("polars") is not None
66
+
67
+ if POLARS_AVAILABLE:
68
+ try:
69
+ POLARS_VERSION = version.parse(importlib.metadata.version("polars"))
70
+ logger.info(f"Polars version {POLARS_VERSION} available.")
71
+ except importlib.metadata.PackageNotFoundError:
72
+ pass
73
+
74
+ TF_VERSION = "N/A"
75
+ TF_AVAILABLE = False
76
+
77
+ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
78
+ TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
79
+ if TF_AVAILABLE:
80
+ # For the metadata, we have to look for both tensorflow and tensorflow-cpu
81
+ for package in [
82
+ "tensorflow",
83
+ "tensorflow-cpu",
84
+ "tensorflow-gpu",
85
+ "tf-nightly",
86
+ "tf-nightly-cpu",
87
+ "tf-nightly-gpu",
88
+ "intel-tensorflow",
89
+ "tensorflow-rocm",
90
+ "tensorflow-macos",
91
+ ]:
92
+ try:
93
+ TF_VERSION = version.parse(importlib.metadata.version(package))
94
+ except importlib.metadata.PackageNotFoundError:
95
+ continue
96
+ else:
97
+ break
98
+ else:
99
+ TF_AVAILABLE = False
100
+ if TF_AVAILABLE:
101
+ if TF_VERSION.major < 2:
102
+ logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
103
+ TF_AVAILABLE = False
104
+ else:
105
+ logger.info(f"TensorFlow version {TF_VERSION} available.")
106
+ else:
107
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
108
+
109
+
110
+ JAX_VERSION = "N/A"
111
+ JAX_AVAILABLE = False
112
+
113
+ if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
114
+ JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None
115
+ if JAX_AVAILABLE:
116
+ try:
117
+ JAX_VERSION = version.parse(importlib.metadata.version("jax"))
118
+ logger.info(f"JAX version {JAX_VERSION} available.")
119
+ except importlib.metadata.PackageNotFoundError:
120
+ pass
121
+ else:
122
+ logger.info("Disabling JAX because USE_JAX is set to False")
123
+
124
+
125
+ USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper()
126
+ BEAM_VERSION = "N/A"
127
+ BEAM_AVAILABLE = False
128
+ if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES:
129
+ try:
130
+ BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam"))
131
+ BEAM_AVAILABLE = True
132
+ logger.info(f"Apache Beam version {BEAM_VERSION} available.")
133
+ except importlib.metadata.PackageNotFoundError:
134
+ pass
135
+ else:
136
+ logger.info("Disabling Apache Beam because USE_BEAM is set to False")
137
+
138
+
139
+ # Optional tools for data loading
140
+ SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None
141
+
142
+ # Optional tools for feature decoding
143
+ PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None
144
+ IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
145
+ importlib.import_module("soundfile").__libsndfile_version__
146
+ ) >= version.parse("1.0.31")
147
+ IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
148
+ importlib.import_module("soundfile").__libsndfile_version__
149
+ ) >= version.parse("1.1.0")
150
+
151
+ # Optional compression tools
152
+ RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None
153
+ ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None
154
+ LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None
155
+ PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None
156
+
157
+ # Cache location
158
+ DEFAULT_XDG_CACHE_HOME = "~/.cache"
159
+ XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
160
+ DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
161
+ HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
162
+
163
+ DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
164
+ HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
165
+
166
+ DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
167
+ HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
168
+
169
+ DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
170
+ HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
171
+
172
+ DOWNLOADED_DATASETS_DIR = "downloads"
173
+ DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR)
174
+ DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH))
175
+
176
+ EXTRACTED_DATASETS_DIR = "extracted"
177
+ DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR)
178
+ EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH))
179
+
180
+ # Download count for the website
181
+ HF_UPDATE_DOWNLOAD_COUNTS = (
182
+ os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
183
+ )
184
+
185
+ # For downloads and to check remote files metadata
186
+ HF_DATASETS_MULTITHREADING_MAX_WORKERS = 16
187
+
188
+ # Remote dataset scripts support
189
+ __HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "1")
190
+ HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = (
191
+ True
192
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES
193
+ else False
194
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES
195
+ else None
196
+ )
197
+ TIME_OUT_REMOTE_CODE = 15
198
+
199
+ # Dataset viewer API
200
+ USE_PARQUET_EXPORT = True
201
+
202
+ # Batch size constants. For more info, see:
203
+ # https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations)
204
+ DEFAULT_MAX_BATCH_SIZE = 1000
205
+
206
+ # Size of the preloaded record batch in `Dataset.__iter__`
207
+ ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10
208
+
209
+ # Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare)
210
+ MAX_SHARD_SIZE = "500MB"
211
+
212
+ # Parquet configuration
213
+ PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100
214
+ PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100
215
+ PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100
216
+
217
+ # Offline mode
218
+ HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
219
+
220
+ # Here, `True` will disable progress bars globally without possibility of enabling it
221
+ # programmatically. `False` will enable them without possibility of disabling them.
222
+ # If environment variable is not set (None), then the user is free to enable/disable
223
+ # them programmatically.
224
+ # TL;DR: env variable has priority over code
225
+ __HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS")
226
+ HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = (
227
+ __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES
228
+ if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None
229
+ else None
230
+ )
231
+
232
+ # In-memory
233
+ DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled
234
+ IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE))
235
+
236
+ # File names
237
+ DATASET_ARROW_FILENAME = "dataset.arrow"
238
+ DATASET_INDICES_FILENAME = "indices.arrow"
239
+ DATASET_STATE_JSON_FILENAME = "state.json"
240
+ DATASET_INFO_FILENAME = "dataset_info.json"
241
+ DATASETDICT_INFOS_FILENAME = "dataset_infos.json"
242
+ LICENSE_FILENAME = "LICENSE"
243
+ METRIC_INFO_FILENAME = "metric_info.json"
244
+ DATASETDICT_JSON_FILENAME = "dataset_dict.json"
245
+ METADATA_CONFIGS_FIELD = "configs"
246
+ REPOCARD_FILENAME = "README.md"
247
+ REPOYAML_FILENAME = ".huggingface.yaml"
248
+
249
+ MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules"
250
+
251
+ MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255
252
+
253
+ # Temporary cache directory prefix
254
+ TEMP_CACHE_DIR_PREFIX = "hf_datasets-"
255
+
256
+ # Streaming
257
+ STREAMING_READ_MAX_RETRIES = 20
258
+ STREAMING_READ_RETRY_INTERVAL = 5
259
+
260
+ # Datasets without script
261
+ DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
262
+ GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10
263
+ ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
264
+
265
+ # Progress bars
266
+ PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec
267
+
268
+ # Maximum number of uploaded files per commit
269
+ UPLOADS_MAX_NUMBER_PER_COMMIT = 50
270
+
271
+ # Backward compatibiliy
272
+ MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
venv/lib/python3.10/site-packages/datasets/data_files.py ADDED
@@ -0,0 +1,821 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from functools import partial
4
+ from glob import has_magic
5
+ from pathlib import Path, PurePath
6
+ from typing import Callable, Dict, List, Optional, Set, Tuple, Union
7
+
8
+ import huggingface_hub
9
+ from fsspec.core import url_to_fs
10
+ from fsspec.implementations.http import HTTPFileSystem
11
+ from huggingface_hub import HfFileSystem
12
+ from packaging import version
13
+ from tqdm.contrib.concurrent import thread_map
14
+
15
+ from . import config
16
+ from .download import DownloadConfig
17
+ from .naming import _split_re
18
+ from .splits import Split
19
+ from .utils import logging
20
+ from .utils import tqdm as hf_tqdm
21
+ from .utils.file_utils import _prepare_path_and_storage_options, is_local_path, is_relative_path, xbasename, xjoin
22
+ from .utils.py_utils import glob_pattern_to_regex, string_to_dict
23
+
24
+
25
+ SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN)
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ class Url(str):
32
+ pass
33
+
34
+
35
+ class EmptyDatasetError(FileNotFoundError):
36
+ pass
37
+
38
+
39
+ SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"
40
+
41
+ SPLIT_KEYWORDS = {
42
+ Split.TRAIN: ["train", "training"],
43
+ Split.VALIDATION: ["validation", "valid", "dev", "val"],
44
+ Split.TEST: ["test", "testing", "eval", "evaluation"],
45
+ }
46
+ NON_WORDS_CHARS = "-._ 0-9"
47
+ if config.FSSPEC_VERSION < version.parse("2023.9.0"):
48
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"]
49
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
50
+ "{keyword}/**",
51
+ "{keyword}[{sep}]*/**",
52
+ "**[{sep}/]{keyword}/**",
53
+ "**[{sep}/]{keyword}[{sep}]*/**",
54
+ ]
55
+ elif config.FSSPEC_VERSION < version.parse("2023.12.0"):
56
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**/*[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"]
57
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
58
+ "{keyword}/**/*",
59
+ "{keyword}[{sep}]*/**/*",
60
+ "**/*[{sep}/]{keyword}/**/*",
61
+ "**/*[{sep}/]{keyword}[{sep}]*/**/*",
62
+ ]
63
+ else:
64
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**/{keyword}[{sep}]*", "**/*[{sep}]{keyword}[{sep}]*"]
65
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
66
+ "**/{keyword}/**",
67
+ "**/{keyword}[{sep}]*/**",
68
+ "**/*[{sep}]{keyword}/**",
69
+ "**/*[{sep}]{keyword}[{sep}]*/**",
70
+ ]
71
+
72
+ DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST]
73
+ DEFAULT_PATTERNS_SPLIT_IN_FILENAME = {
74
+ split: [
75
+ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
76
+ for keyword in SPLIT_KEYWORDS[split]
77
+ for pattern in KEYWORDS_IN_FILENAME_BASE_PATTERNS
78
+ ]
79
+ for split in DEFAULT_SPLITS
80
+ }
81
+ DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME = {
82
+ split: [
83
+ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
84
+ for keyword in SPLIT_KEYWORDS[split]
85
+ for pattern in KEYWORDS_IN_DIR_NAME_BASE_PATTERNS
86
+ ]
87
+ for split in DEFAULT_SPLITS
88
+ }
89
+
90
+
91
+ DEFAULT_PATTERNS_ALL = {
92
+ Split.TRAIN: ["**"],
93
+ }
94
+
95
+ ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED]
96
+ ALL_DEFAULT_PATTERNS = [
97
+ DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME,
98
+ DEFAULT_PATTERNS_SPLIT_IN_FILENAME,
99
+ DEFAULT_PATTERNS_ALL,
100
+ ]
101
+ if config.FSSPEC_VERSION < version.parse("2023.9.0"):
102
+ METADATA_PATTERNS = [
103
+ "metadata.csv",
104
+ "**/metadata.csv",
105
+ "metadata.jsonl",
106
+ "**/metadata.jsonl",
107
+ ] # metadata file for ImageFolder and AudioFolder
108
+ else:
109
+ METADATA_PATTERNS = [
110
+ "**/metadata.csv",
111
+ "**/metadata.jsonl",
112
+ ] # metadata file for ImageFolder and AudioFolder
113
+ WILDCARD_CHARACTERS = "*[]"
114
+ FILES_TO_IGNORE = [
115
+ "README.md",
116
+ "config.json",
117
+ "dataset_info.json",
118
+ "dataset_infos.json",
119
+ "dummy_data.zip",
120
+ "dataset_dict.json",
121
+ ]
122
+
123
+
124
+ def contains_wildcards(pattern: str) -> bool:
125
+ return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS)
126
+
127
+
128
+ def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]:
129
+ """
130
+ Take the data_files patterns from the user, and format them into a dictionary.
131
+ Each key is the name of the split, and each value is a list of data files patterns (paths or urls).
132
+ The default split is "train".
133
+
134
+ Returns:
135
+ patterns: dictionary of split_name -> list of patterns
136
+ """
137
+ if isinstance(patterns, dict):
138
+ return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()}
139
+ elif isinstance(patterns, str):
140
+ return {SANITIZED_DEFAULT_SPLIT: [patterns]}
141
+ elif isinstance(patterns, list):
142
+ if any(isinstance(pattern, dict) for pattern in patterns):
143
+ for pattern in patterns:
144
+ if not (
145
+ isinstance(pattern, dict)
146
+ and len(pattern) == 2
147
+ and "split" in pattern
148
+ and isinstance(pattern.get("path"), (str, list))
149
+ ):
150
+ raise ValueError(
151
+ f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}"
152
+ )
153
+ splits = [pattern["split"] for pattern in patterns]
154
+ if len(set(splits)) != len(splits):
155
+ raise ValueError(f"Some splits are duplicated in data_files: {splits}")
156
+ return {
157
+ str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]]
158
+ for pattern in patterns
159
+ }
160
+ else:
161
+ return {SANITIZED_DEFAULT_SPLIT: patterns}
162
+ else:
163
+ return sanitize_patterns(list(patterns))
164
+
165
+
166
+ def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool:
167
+ """
168
+ When a path matches a pattern, we additionnally check if it's inside a special directory
169
+ we ignore by default (if it starts with a double underscore).
170
+
171
+ Users can still explicitly request a filepath inside such a directory if "__pycache__" is
172
+ mentioned explicitly in the requested pattern.
173
+
174
+ Some examples:
175
+
176
+ base directory:
177
+
178
+ ./
179
+ └── __pycache__
180
+ └── b.txt
181
+
182
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**")
183
+ True
184
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt")
185
+ True
186
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*")
187
+ False
188
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*")
189
+ False
190
+ """
191
+ # We just need to check if every special directories from the path is present explicly in the pattern.
192
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
193
+ # the parent path and the parent pattern have the same number of special directories.
194
+ data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")]
195
+ data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")]
196
+ return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern)
197
+
198
+
199
+ def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool:
200
+ """
201
+ When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside
202
+ a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot.
203
+
204
+ Users can still explicitly request a filepath that is hidden or is inside a hidden directory
205
+ if the hidden part is mentioned explicitly in the requested pattern.
206
+
207
+ Some examples:
208
+
209
+ base directory:
210
+
211
+ ./
212
+ └── .hidden_file.txt
213
+
214
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**")
215
+ True
216
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*")
217
+ False
218
+
219
+ base directory:
220
+
221
+ ./
222
+ └── .hidden_dir
223
+ └── a.txt
224
+
225
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**")
226
+ True
227
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*")
228
+ False
229
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*")
230
+ False
231
+
232
+ base directory:
233
+
234
+ ./
235
+ └── .hidden_dir
236
+ └── .hidden_file.txt
237
+
238
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**")
239
+ True
240
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*")
241
+ True
242
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*")
243
+ False
244
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*")
245
+ True
246
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*")
247
+ False
248
+ """
249
+ # We just need to check if every hidden part from the path is present explicly in the pattern.
250
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
251
+ # the path and the pattern have the same number of hidden parts.
252
+ hidden_directories_in_path = [
253
+ part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."}
254
+ ]
255
+ hidden_directories_in_pattern = [
256
+ part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."}
257
+ ]
258
+ return len(hidden_directories_in_path) != len(hidden_directories_in_pattern)
259
+
260
+
261
+ def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]:
262
+ """
263
+ Get the default pattern from a directory or repository by testing all the supported patterns.
264
+ The first patterns to return a non-empty list of data files is returned.
265
+
266
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
267
+ """
268
+ # first check the split patterns like data/{split}-00000-of-00001.parquet
269
+ for split_pattern in ALL_SPLIT_PATTERNS:
270
+ pattern = split_pattern.replace("{split}", "*")
271
+ try:
272
+ data_files = pattern_resolver(pattern)
273
+ except FileNotFoundError:
274
+ continue
275
+ if len(data_files) > 0:
276
+ splits: Set[str] = {
277
+ string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))["split"]
278
+ for p in data_files
279
+ }
280
+ if any(not re.match(_split_re, split) for split in splits):
281
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{splits}'.")
282
+ sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted(
283
+ splits - set(DEFAULT_SPLITS)
284
+ )
285
+ return {split: [split_pattern.format(split=split)] for split in sorted_splits}
286
+ # then check the default patterns based on train/valid/test splits
287
+ for patterns_dict in ALL_DEFAULT_PATTERNS:
288
+ non_empty_splits = []
289
+ for split, patterns in patterns_dict.items():
290
+ for pattern in patterns:
291
+ try:
292
+ data_files = pattern_resolver(pattern)
293
+ except FileNotFoundError:
294
+ continue
295
+ if len(data_files) > 0:
296
+ non_empty_splits.append(split)
297
+ break
298
+ if non_empty_splits:
299
+ return {split: patterns_dict[split] for split in non_empty_splits}
300
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
301
+
302
+
303
+ def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]:
304
+ """
305
+ Get the supported metadata patterns from a directory or repository.
306
+ """
307
+ non_empty_patterns = []
308
+ for pattern in METADATA_PATTERNS:
309
+ try:
310
+ metadata_files = pattern_resolver(pattern)
311
+ if len(metadata_files) > 0:
312
+ non_empty_patterns.append(pattern)
313
+ except FileNotFoundError:
314
+ pass
315
+ if non_empty_patterns:
316
+ return non_empty_patterns
317
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
318
+
319
+
320
+ def resolve_pattern(
321
+ pattern: str,
322
+ base_path: str,
323
+ allowed_extensions: Optional[List[str]] = None,
324
+ download_config: Optional[DownloadConfig] = None,
325
+ ) -> List[str]:
326
+ """
327
+ Resolve the paths and URLs of the data files from the pattern passed by the user.
328
+
329
+ You can use patterns to resolve multiple local files. Here are a few examples:
330
+ - *.csv to match all the CSV files at the first level
331
+ - **.csv to match all the CSV files at any level
332
+ - data/* to match all the files inside "data"
333
+ - data/** to match all the files inside "data" and its subdirectories
334
+
335
+ The patterns are resolved using the fsspec glob. In fsspec>=2023.12.0 this is equivalent to
336
+ Python's glob.glob, Path.glob, Path.match and fnmatch where ** is unsupported with a prefix/suffix
337
+ other than a forward slash /.
338
+
339
+ More generally:
340
+ - '*' matches any character except a forward-slash (to match just the file or directory name)
341
+ - '**' matches any character including a forward-slash /
342
+
343
+ Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested.
344
+ The same applies to special directories that start with a double underscore like "__pycache__".
345
+ You can still include one if the pattern explicilty mentions it:
346
+ - to include a hidden file: "*/.hidden.txt" or "*/.*"
347
+ - to include a hidden directory: ".hidden/*" or ".*/*"
348
+ - to include a special directory: "__special__/*" or "__*/*"
349
+
350
+ Example::
351
+
352
+ >>> from datasets.data_files import resolve_pattern
353
+ >>> base_path = "."
354
+ >>> resolve_pattern("docs/**/*.py", base_path)
355
+ [/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py']
356
+
357
+ Args:
358
+ pattern (str): Unix pattern or paths or URLs of the data files to resolve.
359
+ The paths can be absolute or relative to base_path.
360
+ Remote filesystems using fsspec are supported, e.g. with the hf:// protocol.
361
+ base_path (str): Base path to use when resolving relative paths.
362
+ allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions).
363
+ For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"]
364
+ Returns:
365
+ List[str]: List of paths or URLs to the local or remote files that match the patterns.
366
+ """
367
+ if is_relative_path(pattern):
368
+ pattern = xjoin(base_path, pattern)
369
+ elif is_local_path(pattern):
370
+ base_path = os.path.splitdrive(pattern)[0] + os.sep
371
+ else:
372
+ base_path = ""
373
+ pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config)
374
+ fs, fs_pattern = url_to_fs(pattern, **storage_options)
375
+ files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)}
376
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]
377
+ protocol_prefix = protocol + "://" if protocol != "file" else ""
378
+ glob_kwargs = {}
379
+ if protocol == "hf" and config.HF_HUB_VERSION >= version.parse("0.20.0"):
380
+ # 10 times faster glob with detail=True (ignores costly info like lastCommit)
381
+ glob_kwargs["expand_info"] = False
382
+ matched_paths = [
383
+ filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath
384
+ for filepath, info in fs.glob(pattern, detail=True, **glob_kwargs).items()
385
+ if info["type"] == "file"
386
+ and (xbasename(filepath) not in files_to_ignore)
387
+ and not _is_inside_unrequested_special_dir(filepath, fs_pattern)
388
+ and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(filepath, fs_pattern)
389
+ ] # ignore .ipynb and __pycache__, but keep /../
390
+ if allowed_extensions is not None:
391
+ out = [
392
+ filepath
393
+ for filepath in matched_paths
394
+ if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:])
395
+ ]
396
+ if len(out) < len(matched_paths):
397
+ invalid_matched_files = list(set(matched_paths) - set(out))
398
+ logger.info(
399
+ f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}"
400
+ )
401
+ else:
402
+ out = matched_paths
403
+ if not out:
404
+ error_msg = f"Unable to find '{pattern}'"
405
+ if allowed_extensions is not None:
406
+ error_msg += f" with any supported extension {list(allowed_extensions)}"
407
+ raise FileNotFoundError(error_msg)
408
+ return out
409
+
410
+
411
+ def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]:
412
+ """
413
+ Get the default pattern from a directory testing all the supported patterns.
414
+ The first patterns to return a non-empty list of data files is returned.
415
+
416
+ Some examples of supported patterns:
417
+
418
+ Input:
419
+
420
+ my_dataset_repository/
421
+ ├── README.md
422
+ └── dataset.csv
423
+
424
+ Output:
425
+
426
+ {'train': ['**']}
427
+
428
+ Input:
429
+
430
+ my_dataset_repository/
431
+ ├── README.md
432
+ ├── train.csv
433
+ └── test.csv
434
+
435
+ my_dataset_repository/
436
+ ├── README.md
437
+ └── data/
438
+ ├── train.csv
439
+ └── test.csv
440
+
441
+ my_dataset_repository/
442
+ ├── README.md
443
+ ├── train_0.csv
444
+ ├── train_1.csv
445
+ ├── train_2.csv
446
+ ├── train_3.csv
447
+ ├── test_0.csv
448
+ └── test_1.csv
449
+
450
+ Output:
451
+
452
+ {'train': ['**/train[-._ 0-9]*', '**/*[-._ 0-9]train[-._ 0-9]*', '**/training[-._ 0-9]*', '**/*[-._ 0-9]training[-._ 0-9]*'],
453
+ 'test': ['**/test[-._ 0-9]*', '**/*[-._ 0-9]test[-._ 0-9]*', '**/testing[-._ 0-9]*', '**/*[-._ 0-9]testing[-._ 0-9]*', ...]}
454
+
455
+ Input:
456
+
457
+ my_dataset_repository/
458
+ ├── README.md
459
+ └── data/
460
+ ├── train/
461
+ │ ├── shard_0.csv
462
+ │ ├── shard_1.csv
463
+ │ ├── shard_2.csv
464
+ │ └── shard_3.csv
465
+ └── test/
466
+ ├── shard_0.csv
467
+ └── shard_1.csv
468
+
469
+ Output:
470
+
471
+ {'train': ['**/train/**', '**/train[-._ 0-9]*/**', '**/*[-._ 0-9]train/**', '**/*[-._ 0-9]train[-._ 0-9]*/**', ...],
472
+ 'test': ['**/test/**', '**/test[-._ 0-9]*/**', '**/*[-._ 0-9]test/**', '**/*[-._ 0-9]test[-._ 0-9]*/**', ...]}
473
+
474
+ Input:
475
+
476
+ my_dataset_repository/
477
+ ├── README.md
478
+ └── data/
479
+ ├── train-00000-of-00003.csv
480
+ ├── train-00001-of-00003.csv
481
+ ├── train-00002-of-00003.csv
482
+ ├── test-00000-of-00001.csv
483
+ ├── random-00000-of-00003.csv
484
+ ├── random-00001-of-00003.csv
485
+ └── random-00002-of-00003.csv
486
+
487
+ Output:
488
+
489
+ {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
490
+ 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
491
+ 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']}
492
+
493
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
494
+ """
495
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
496
+ try:
497
+ return _get_data_files_patterns(resolver)
498
+ except FileNotFoundError:
499
+ raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None
500
+
501
+
502
+ def get_metadata_patterns(
503
+ base_path: str,
504
+ download_config: Optional[DownloadConfig] = None,
505
+ ) -> List[str]:
506
+ """
507
+ Get the supported metadata patterns from a local directory.
508
+ """
509
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
510
+ try:
511
+ return _get_metadata_files_patterns(resolver)
512
+ except FileNotFoundError:
513
+ raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None
514
+
515
+
516
+ def _get_single_origin_metadata(
517
+ data_file: str,
518
+ download_config: Optional[DownloadConfig] = None,
519
+ ) -> Tuple[str]:
520
+ data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config)
521
+ fs, *_ = url_to_fs(data_file, **storage_options)
522
+ if isinstance(fs, HfFileSystem):
523
+ resolved_path = fs.resolve_path(data_file)
524
+ return (resolved_path.repo_id, resolved_path.revision)
525
+ elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT):
526
+ hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
527
+ data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
528
+ resolved_path = hffs.resolve_path(data_file)
529
+ return (resolved_path.repo_id, resolved_path.revision)
530
+ info = fs.info(data_file)
531
+ # s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime
532
+ for key in ["ETag", "etag", "mtime"]:
533
+ if key in info:
534
+ return (str(info[key]),)
535
+ return ()
536
+
537
+
538
+ def _get_origin_metadata(
539
+ data_files: List[str],
540
+ download_config: Optional[DownloadConfig] = None,
541
+ max_workers: Optional[int] = None,
542
+ ) -> Tuple[str]:
543
+ max_workers = max_workers if max_workers is not None else config.HF_DATASETS_MULTITHREADING_MAX_WORKERS
544
+ return thread_map(
545
+ partial(_get_single_origin_metadata, download_config=download_config),
546
+ data_files,
547
+ max_workers=max_workers,
548
+ tqdm_class=hf_tqdm,
549
+ desc="Resolving data files",
550
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
551
+ disable=len(data_files) <= 16 or None,
552
+ )
553
+
554
+
555
+ class DataFilesList(List[str]):
556
+ """
557
+ List of data files (absolute local paths or URLs).
558
+ It has two construction methods given the user's data files patterns :
559
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
560
+ - ``from_local_or_remote``: resolve patterns from a local path
561
+
562
+ Moreover DataFilesList has an additional attribute ``origin_metadata``.
563
+ It can store:
564
+ - the last modified time of local files
565
+ - ETag of remote files
566
+ - commit sha of a dataset repository
567
+
568
+ Thanks to this additional attribute, it is possible to hash the list
569
+ and get a different hash if and only if at least one file changed.
570
+ This is useful for caching Dataset objects that are obtained from a list of data files.
571
+ """
572
+
573
+ def __init__(self, data_files: List[str], origin_metadata: List[Tuple[str]]):
574
+ super().__init__(data_files)
575
+ self.origin_metadata = origin_metadata
576
+
577
+ def __add__(self, other):
578
+ return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata)
579
+
580
+ @classmethod
581
+ def from_hf_repo(
582
+ cls,
583
+ patterns: List[str],
584
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
585
+ base_path: Optional[str] = None,
586
+ allowed_extensions: Optional[List[str]] = None,
587
+ download_config: Optional[DownloadConfig] = None,
588
+ ) -> "DataFilesList":
589
+ base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/")
590
+ return cls.from_patterns(
591
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
592
+ )
593
+
594
+ @classmethod
595
+ def from_local_or_remote(
596
+ cls,
597
+ patterns: List[str],
598
+ base_path: Optional[str] = None,
599
+ allowed_extensions: Optional[List[str]] = None,
600
+ download_config: Optional[DownloadConfig] = None,
601
+ ) -> "DataFilesList":
602
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
603
+ return cls.from_patterns(
604
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
605
+ )
606
+
607
+ @classmethod
608
+ def from_patterns(
609
+ cls,
610
+ patterns: List[str],
611
+ base_path: Optional[str] = None,
612
+ allowed_extensions: Optional[List[str]] = None,
613
+ download_config: Optional[DownloadConfig] = None,
614
+ ) -> "DataFilesList":
615
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
616
+ data_files = []
617
+ for pattern in patterns:
618
+ try:
619
+ data_files.extend(
620
+ resolve_pattern(
621
+ pattern,
622
+ base_path=base_path,
623
+ allowed_extensions=allowed_extensions,
624
+ download_config=download_config,
625
+ )
626
+ )
627
+ except FileNotFoundError:
628
+ if not has_magic(pattern):
629
+ raise
630
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
631
+ return cls(data_files, origin_metadata)
632
+
633
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
634
+ pattern = "|".join("\\" + ext for ext in extensions)
635
+ pattern = re.compile(f".*({pattern})(\\..+)?$")
636
+ return DataFilesList(
637
+ [data_file for data_file in self if pattern.match(data_file)],
638
+ origin_metadata=self.origin_metadata,
639
+ )
640
+
641
+
642
+ class DataFilesDict(Dict[str, DataFilesList]):
643
+ """
644
+ Dict of split_name -> list of data files (absolute local paths or URLs).
645
+ It has two construction methods given the user's data files patterns :
646
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
647
+ - ``from_local_or_remote``: resolve patterns from a local path
648
+
649
+ Moreover each list is a DataFilesList. It is possible to hash the dictionary
650
+ and get a different hash if and only if at least one file changed.
651
+ For more info, see ``DataFilesList``.
652
+
653
+ This is useful for caching Dataset objects that are obtained from a list of data files.
654
+
655
+ Changing the order of the keys of this dictionary also doesn't change its hash.
656
+ """
657
+
658
+ @classmethod
659
+ def from_local_or_remote(
660
+ cls,
661
+ patterns: Dict[str, Union[List[str], DataFilesList]],
662
+ base_path: Optional[str] = None,
663
+ allowed_extensions: Optional[List[str]] = None,
664
+ download_config: Optional[DownloadConfig] = None,
665
+ ) -> "DataFilesDict":
666
+ out = cls()
667
+ for key, patterns_for_key in patterns.items():
668
+ out[key] = (
669
+ DataFilesList.from_local_or_remote(
670
+ patterns_for_key,
671
+ base_path=base_path,
672
+ allowed_extensions=allowed_extensions,
673
+ download_config=download_config,
674
+ )
675
+ if not isinstance(patterns_for_key, DataFilesList)
676
+ else patterns_for_key
677
+ )
678
+ return out
679
+
680
+ @classmethod
681
+ def from_hf_repo(
682
+ cls,
683
+ patterns: Dict[str, Union[List[str], DataFilesList]],
684
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
685
+ base_path: Optional[str] = None,
686
+ allowed_extensions: Optional[List[str]] = None,
687
+ download_config: Optional[DownloadConfig] = None,
688
+ ) -> "DataFilesDict":
689
+ out = cls()
690
+ for key, patterns_for_key in patterns.items():
691
+ out[key] = (
692
+ DataFilesList.from_hf_repo(
693
+ patterns_for_key,
694
+ dataset_info=dataset_info,
695
+ base_path=base_path,
696
+ allowed_extensions=allowed_extensions,
697
+ download_config=download_config,
698
+ )
699
+ if not isinstance(patterns_for_key, DataFilesList)
700
+ else patterns_for_key
701
+ )
702
+ return out
703
+
704
+ @classmethod
705
+ def from_patterns(
706
+ cls,
707
+ patterns: Dict[str, Union[List[str], DataFilesList]],
708
+ base_path: Optional[str] = None,
709
+ allowed_extensions: Optional[List[str]] = None,
710
+ download_config: Optional[DownloadConfig] = None,
711
+ ) -> "DataFilesDict":
712
+ out = cls()
713
+ for key, patterns_for_key in patterns.items():
714
+ out[key] = (
715
+ DataFilesList.from_patterns(
716
+ patterns_for_key,
717
+ base_path=base_path,
718
+ allowed_extensions=allowed_extensions,
719
+ download_config=download_config,
720
+ )
721
+ if not isinstance(patterns_for_key, DataFilesList)
722
+ else patterns_for_key
723
+ )
724
+ return out
725
+
726
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
727
+ out = type(self)()
728
+ for key, data_files_list in self.items():
729
+ out[key] = data_files_list.filter_extensions(extensions)
730
+ return out
731
+
732
+
733
+ class DataFilesPatternsList(List[str]):
734
+ """
735
+ List of data files patterns (absolute local paths or URLs).
736
+ For each pattern there should also be a list of allowed extensions
737
+ to keep, or a None ot keep all the files for the pattern.
738
+ """
739
+
740
+ def __init__(
741
+ self,
742
+ patterns: List[str],
743
+ allowed_extensions: List[Optional[List[str]]],
744
+ ):
745
+ super().__init__(patterns)
746
+ self.allowed_extensions = allowed_extensions
747
+
748
+ def __add__(self, other):
749
+ return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions)
750
+
751
+ @classmethod
752
+ def from_patterns(
753
+ cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None
754
+ ) -> "DataFilesPatternsDict":
755
+ return cls(patterns, [allowed_extensions] * len(patterns))
756
+
757
+ def resolve(
758
+ self,
759
+ base_path: str,
760
+ download_config: Optional[DownloadConfig] = None,
761
+ ) -> "DataFilesList":
762
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
763
+ data_files = []
764
+ for pattern, allowed_extensions in zip(self, self.allowed_extensions):
765
+ try:
766
+ data_files.extend(
767
+ resolve_pattern(
768
+ pattern,
769
+ base_path=base_path,
770
+ allowed_extensions=allowed_extensions,
771
+ download_config=download_config,
772
+ )
773
+ )
774
+ except FileNotFoundError:
775
+ if not has_magic(pattern):
776
+ raise
777
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
778
+ return DataFilesList(data_files, origin_metadata)
779
+
780
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
781
+ return DataFilesPatternsList(
782
+ self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions]
783
+ )
784
+
785
+
786
+ class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]):
787
+ """
788
+ Dict of split_name -> list of data files patterns (absolute local paths or URLs).
789
+ """
790
+
791
+ @classmethod
792
+ def from_patterns(
793
+ cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None
794
+ ) -> "DataFilesPatternsDict":
795
+ out = cls()
796
+ for key, patterns_for_key in patterns.items():
797
+ out[key] = (
798
+ DataFilesPatternsList.from_patterns(
799
+ patterns_for_key,
800
+ allowed_extensions=allowed_extensions,
801
+ )
802
+ if not isinstance(patterns_for_key, DataFilesPatternsList)
803
+ else patterns_for_key
804
+ )
805
+ return out
806
+
807
+ def resolve(
808
+ self,
809
+ base_path: str,
810
+ download_config: Optional[DownloadConfig] = None,
811
+ ) -> "DataFilesDict":
812
+ out = DataFilesDict()
813
+ for key, data_files_patterns_list in self.items():
814
+ out[key] = data_files_patterns_list.resolve(base_path, download_config)
815
+ return out
816
+
817
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict":
818
+ out = type(self)()
819
+ for key, data_files_patterns_list in self.items():
820
+ out[key] = data_files_patterns_list.filter_extensions(extensions)
821
+ return out
venv/lib/python3.10/site-packages/datasets/distributed.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypeVar
2
+
3
+ from .arrow_dataset import Dataset, _split_by_node_map_style_dataset
4
+ from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset
5
+
6
+
7
+ DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
8
+
9
+
10
+ def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType:
11
+ """
12
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
13
+
14
+ For map-style datasets:
15
+
16
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
17
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
18
+
19
+ For iterable datasets:
20
+
21
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
22
+ then the shards are evenly assigned across the nodes, which is the most optimized.
23
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
24
+
25
+ Args:
26
+ dataset ([`Dataset`] or [`IterableDataset`]):
27
+ The dataset to split by node.
28
+ rank (`int`):
29
+ Rank of the current node.
30
+ world_size (`int`):
31
+ Total number of nodes.
32
+
33
+ Returns:
34
+ [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`.
35
+ """
36
+ if isinstance(dataset, Dataset):
37
+ return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size)
38
+ else:
39
+ return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
venv/lib/python3.10/site-packages/datasets/features/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ruff: noqa
2
+
3
+ __all__ = [
4
+ "Audio",
5
+ "Array2D",
6
+ "Array3D",
7
+ "Array4D",
8
+ "Array5D",
9
+ "ClassLabel",
10
+ "Features",
11
+ "Sequence",
12
+ "Value",
13
+ "Image",
14
+ "Translation",
15
+ "TranslationVariableLanguages",
16
+ ]
17
+ from .audio import Audio
18
+ from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
19
+ from .image import Image
20
+ from .translation import Translation, TranslationVariableLanguages
venv/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (591 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
venv/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc ADDED
Binary file (75.8 kB). View file
 
venv/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
venv/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc ADDED
Binary file (5.19 kB). View file
 
venv/lib/python3.10/site-packages/datasets/features/audio.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dataclasses import dataclass, field
3
+ from io import BytesIO
4
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
5
+
6
+ import numpy as np
7
+ import pyarrow as pa
8
+
9
+ from .. import config
10
+ from ..download.download_config import DownloadConfig
11
+ from ..table import array_cast
12
+ from ..utils.file_utils import xopen, xsplitext
13
+ from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
14
+
15
+
16
+ if TYPE_CHECKING:
17
+ from .features import FeatureType
18
+
19
+
20
+ @dataclass
21
+ class Audio:
22
+ """Audio [`Feature`] to extract audio data from an audio file.
23
+
24
+ Input: The Audio feature accepts as input:
25
+ - A `str`: Absolute path to the audio file (i.e. random access is allowed).
26
+ - A `dict` with the keys:
27
+
28
+ - `path`: String with relative path of the audio file to the archive file.
29
+ - `bytes`: Bytes content of the audio file.
30
+
31
+ This is useful for archived files with sequential access.
32
+
33
+ - A `dict` with the keys:
34
+
35
+ - `path`: String with relative path of the audio file to the archive file.
36
+ - `array`: Array containing the audio sample
37
+ - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
38
+
39
+ This is useful for archived files with sequential access.
40
+
41
+ Args:
42
+ sampling_rate (`int`, *optional*):
43
+ Target sampling rate. If `None`, the native sampling rate is used.
44
+ mono (`bool`, defaults to `True`):
45
+ Whether to convert the audio signal to mono by averaging samples across
46
+ channels.
47
+ decode (`bool`, defaults to `True`):
48
+ Whether to decode the audio data. If `False`,
49
+ returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
50
+
51
+ Example:
52
+
53
+ ```py
54
+ >>> from datasets import load_dataset, Audio
55
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
56
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
57
+ >>> ds[0]["audio"]
58
+ {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
59
+ 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
60
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
61
+ 'sampling_rate': 16000}
62
+ ```
63
+ """
64
+
65
+ sampling_rate: Optional[int] = None
66
+ mono: bool = True
67
+ decode: bool = True
68
+ id: Optional[str] = None
69
+ # Automatically constructed
70
+ dtype: ClassVar[str] = "dict"
71
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
72
+ _type: str = field(default="Audio", init=False, repr=False)
73
+
74
+ def __call__(self):
75
+ return self.pa_type
76
+
77
+ def encode_example(self, value: Union[str, bytes, dict]) -> dict:
78
+ """Encode example into a format for Arrow.
79
+
80
+ Args:
81
+ value (`str` or `dict`):
82
+ Data passed as input to Audio feature.
83
+
84
+ Returns:
85
+ `dict`
86
+ """
87
+ try:
88
+ import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
89
+ except ImportError as err:
90
+ raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
91
+ if isinstance(value, str):
92
+ return {"bytes": None, "path": value}
93
+ elif isinstance(value, bytes):
94
+ return {"bytes": value, "path": None}
95
+ elif "array" in value:
96
+ # convert the audio array to wav bytes
97
+ buffer = BytesIO()
98
+ sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
99
+ return {"bytes": buffer.getvalue(), "path": None}
100
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
101
+ # we set "bytes": None to not duplicate the data if they're already available locally
102
+ if value["path"].endswith("pcm"):
103
+ # "PCM" only has raw audio bytes
104
+ if value.get("sampling_rate") is None:
105
+ # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
106
+ raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
107
+ if value.get("bytes"):
108
+ # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
109
+ bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
110
+ else:
111
+ bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
112
+
113
+ buffer = BytesIO(bytes())
114
+ sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
115
+ return {"bytes": buffer.getvalue(), "path": None}
116
+ else:
117
+ return {"bytes": None, "path": value.get("path")}
118
+ elif value.get("bytes") is not None or value.get("path") is not None:
119
+ # store the audio bytes, and path is used to infer the audio format using the file extension
120
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
121
+ else:
122
+ raise ValueError(
123
+ f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
124
+ )
125
+
126
+ def decode_example(
127
+ self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
128
+ ) -> dict:
129
+ """Decode example audio file into audio data.
130
+
131
+ Args:
132
+ value (`dict`):
133
+ A dictionary with keys:
134
+
135
+ - `path`: String with relative audio file path.
136
+ - `bytes`: Bytes of the audio file.
137
+ token_per_repo_id (`dict`, *optional*):
138
+ To access and decode
139
+ audio files from private repositories on the Hub, you can pass
140
+ a dictionary repo_id (`str`) -> token (`bool` or `str`)
141
+
142
+ Returns:
143
+ `dict`
144
+ """
145
+ if not self.decode:
146
+ raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
147
+
148
+ path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
149
+ if path is None and file is None:
150
+ raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
151
+
152
+ try:
153
+ import librosa
154
+ import soundfile as sf
155
+ except ImportError as err:
156
+ raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
157
+
158
+ audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
159
+ if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
160
+ raise RuntimeError(
161
+ "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
162
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
163
+ )
164
+ elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
165
+ raise RuntimeError(
166
+ "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
167
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
168
+ )
169
+
170
+ if file is None:
171
+ token_per_repo_id = token_per_repo_id or {}
172
+ source_url = path.split("::")[-1]
173
+ pattern = (
174
+ config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
175
+ )
176
+ try:
177
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
178
+ token = token_per_repo_id[repo_id]
179
+ except (ValueError, KeyError):
180
+ token = None
181
+
182
+ download_config = DownloadConfig(token=token)
183
+ with xopen(path, "rb", download_config=download_config) as f:
184
+ array, sampling_rate = sf.read(f)
185
+
186
+ else:
187
+ array, sampling_rate = sf.read(file)
188
+
189
+ array = array.T
190
+ if self.mono:
191
+ array = librosa.to_mono(array)
192
+ if self.sampling_rate and self.sampling_rate != sampling_rate:
193
+ array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
194
+ sampling_rate = self.sampling_rate
195
+
196
+ return {"path": path, "array": array, "sampling_rate": sampling_rate}
197
+
198
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
199
+ """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
200
+ from .features import Value
201
+
202
+ if self.decode:
203
+ raise ValueError("Cannot flatten a decoded Audio feature.")
204
+ return {
205
+ "bytes": Value("binary"),
206
+ "path": Value("string"),
207
+ }
208
+
209
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
210
+ """Cast an Arrow array to the Audio arrow storage type.
211
+ The Arrow types that can be converted to the Audio pyarrow storage type are:
212
+
213
+ - `pa.string()` - it must contain the "path" data
214
+ - `pa.binary()` - it must contain the audio bytes
215
+ - `pa.struct({"bytes": pa.binary()})`
216
+ - `pa.struct({"path": pa.string()})`
217
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
218
+
219
+ Args:
220
+ storage (`Union[pa.StringArray, pa.StructArray]`):
221
+ PyArrow array to cast.
222
+
223
+ Returns:
224
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
225
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`
226
+ """
227
+ if pa.types.is_string(storage.type):
228
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
229
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
230
+ elif pa.types.is_binary(storage.type):
231
+ path_array = pa.array([None] * len(storage), type=pa.string())
232
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
233
+ elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
234
+ storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
235
+ elif pa.types.is_struct(storage.type):
236
+ if storage.type.get_field_index("bytes") >= 0:
237
+ bytes_array = storage.field("bytes")
238
+ else:
239
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
240
+ if storage.type.get_field_index("path") >= 0:
241
+ path_array = storage.field("path")
242
+ else:
243
+ path_array = pa.array([None] * len(storage), type=pa.string())
244
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
245
+ return array_cast(storage, self.pa_type)
246
+
247
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
248
+ """Embed audio files into the Arrow array.
249
+
250
+ Args:
251
+ storage (`pa.StructArray`):
252
+ PyArrow array to embed.
253
+
254
+ Returns:
255
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
256
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
257
+ """
258
+
259
+ @no_op_if_value_is_null
260
+ def path_to_bytes(path):
261
+ with xopen(path, "rb") as f:
262
+ bytes_ = f.read()
263
+ return bytes_
264
+
265
+ bytes_array = pa.array(
266
+ [
267
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
268
+ for x in storage.to_pylist()
269
+ ],
270
+ type=pa.binary(),
271
+ )
272
+ path_array = pa.array(
273
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
274
+ type=pa.string(),
275
+ )
276
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
277
+ return array_cast(storage, self.pa_type)
venv/lib/python3.10/site-packages/datasets/features/features.py ADDED
@@ -0,0 +1,2202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """This class handle features definition in datasets and some utilities to display table type."""
17
+
18
+ import copy
19
+ import json
20
+ import re
21
+ import sys
22
+ from collections.abc import Iterable, Mapping
23
+ from collections.abc import Sequence as SequenceABC
24
+ from dataclasses import InitVar, dataclass, field, fields
25
+ from functools import reduce, wraps
26
+ from operator import mul
27
+ from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
28
+ from typing import Sequence as Sequence_
29
+
30
+ import numpy as np
31
+ import pandas as pd
32
+ import pyarrow as pa
33
+ import pyarrow.compute as pc
34
+ import pyarrow.types
35
+ import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
36
+ from pandas.api.extensions import ExtensionArray as PandasExtensionArray
37
+ from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
38
+
39
+ from .. import config
40
+ from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
41
+ from ..table import array_cast
42
+ from ..utils import experimental, logging
43
+ from ..utils.py_utils import asdict, first_non_null_value, zip_dict
44
+ from .audio import Audio
45
+ from .image import Image, encode_pil_image
46
+ from .translation import Translation, TranslationVariableLanguages
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
53
+ """
54
+ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
55
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
56
+ """
57
+ if pyarrow.types.is_null(arrow_type):
58
+ return "null"
59
+ elif pyarrow.types.is_boolean(arrow_type):
60
+ return "bool"
61
+ elif pyarrow.types.is_int8(arrow_type):
62
+ return "int8"
63
+ elif pyarrow.types.is_int16(arrow_type):
64
+ return "int16"
65
+ elif pyarrow.types.is_int32(arrow_type):
66
+ return "int32"
67
+ elif pyarrow.types.is_int64(arrow_type):
68
+ return "int64"
69
+ elif pyarrow.types.is_uint8(arrow_type):
70
+ return "uint8"
71
+ elif pyarrow.types.is_uint16(arrow_type):
72
+ return "uint16"
73
+ elif pyarrow.types.is_uint32(arrow_type):
74
+ return "uint32"
75
+ elif pyarrow.types.is_uint64(arrow_type):
76
+ return "uint64"
77
+ elif pyarrow.types.is_float16(arrow_type):
78
+ return "float16" # pyarrow dtype is "halffloat"
79
+ elif pyarrow.types.is_float32(arrow_type):
80
+ return "float32" # pyarrow dtype is "float"
81
+ elif pyarrow.types.is_float64(arrow_type):
82
+ return "float64" # pyarrow dtype is "double"
83
+ elif pyarrow.types.is_time32(arrow_type):
84
+ return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
85
+ elif pyarrow.types.is_time64(arrow_type):
86
+ return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
87
+ elif pyarrow.types.is_timestamp(arrow_type):
88
+ if arrow_type.tz is None:
89
+ return f"timestamp[{arrow_type.unit}]"
90
+ elif arrow_type.tz:
91
+ return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
92
+ else:
93
+ raise ValueError(f"Unexpected timestamp object {arrow_type}.")
94
+ elif pyarrow.types.is_date32(arrow_type):
95
+ return "date32" # pyarrow dtype is "date32[day]"
96
+ elif pyarrow.types.is_date64(arrow_type):
97
+ return "date64" # pyarrow dtype is "date64[ms]"
98
+ elif pyarrow.types.is_duration(arrow_type):
99
+ return f"duration[{arrow_type.unit}]"
100
+ elif pyarrow.types.is_decimal128(arrow_type):
101
+ return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
102
+ elif pyarrow.types.is_decimal256(arrow_type):
103
+ return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
104
+ elif pyarrow.types.is_binary(arrow_type):
105
+ return "binary"
106
+ elif pyarrow.types.is_large_binary(arrow_type):
107
+ return "large_binary"
108
+ elif pyarrow.types.is_string(arrow_type):
109
+ return "string"
110
+ elif pyarrow.types.is_large_string(arrow_type):
111
+ return "large_string"
112
+ else:
113
+ raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
114
+
115
+
116
+ def string_to_arrow(datasets_dtype: str) -> pa.DataType:
117
+ """
118
+ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
119
+
120
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
121
+
122
+ This is necessary because the datasets.Value() primitive type is constructed using a string dtype
123
+
124
+ Value(dtype=str)
125
+
126
+ But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
127
+ which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
128
+ purpose of this function.
129
+ """
130
+
131
+ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
132
+ msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
133
+ if examples:
134
+ examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
135
+ msg += f"\nValid examples include: {examples}."
136
+ if urls:
137
+ urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
138
+ msg += f"\nFor more insformation, see: {urls}."
139
+ return msg
140
+
141
+ if datasets_dtype in pa.__dict__:
142
+ return pa.__dict__[datasets_dtype]()
143
+
144
+ if (datasets_dtype + "_") in pa.__dict__:
145
+ return pa.__dict__[datasets_dtype + "_"]()
146
+
147
+ timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
148
+ if timestamp_matches:
149
+ timestamp_internals = timestamp_matches.group(1)
150
+ internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
151
+ if timestamp_internals in ["s", "ms", "us", "ns"]:
152
+ return pa.timestamp(timestamp_internals)
153
+ elif internals_matches:
154
+ return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
155
+ else:
156
+ raise ValueError(
157
+ _dtype_error_msg(
158
+ datasets_dtype,
159
+ "timestamp",
160
+ examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
161
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
162
+ )
163
+ )
164
+
165
+ duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
166
+ if duration_matches:
167
+ duration_internals = duration_matches.group(1)
168
+ if duration_internals in ["s", "ms", "us", "ns"]:
169
+ return pa.duration(duration_internals)
170
+ else:
171
+ raise ValueError(
172
+ _dtype_error_msg(
173
+ datasets_dtype,
174
+ "duration",
175
+ examples=["duration[s]", "duration[us]"],
176
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
177
+ )
178
+ )
179
+
180
+ time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
181
+ if time_matches:
182
+ time_internals_bits = time_matches.group(1)
183
+ if time_internals_bits == "32":
184
+ time_internals_unit = time_matches.group(2)
185
+ if time_internals_unit in ["s", "ms"]:
186
+ return pa.time32(time_internals_unit)
187
+ else:
188
+ raise ValueError(
189
+ f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
190
+ )
191
+ elif time_internals_bits == "64":
192
+ time_internals_unit = time_matches.group(2)
193
+ if time_internals_unit in ["us", "ns"]:
194
+ return pa.time64(time_internals_unit)
195
+ else:
196
+ raise ValueError(
197
+ f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
198
+ )
199
+ else:
200
+ raise ValueError(
201
+ _dtype_error_msg(
202
+ datasets_dtype,
203
+ "time",
204
+ examples=["time32[s]", "time64[us]"],
205
+ urls=[
206
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
207
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
208
+ ],
209
+ )
210
+ )
211
+
212
+ decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
213
+ if decimal_matches:
214
+ decimal_internals_bits = decimal_matches.group(1)
215
+ if decimal_internals_bits == "128":
216
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
217
+ if decimal_internals_precision_and_scale:
218
+ precision = decimal_internals_precision_and_scale.group(1)
219
+ scale = decimal_internals_precision_and_scale.group(2)
220
+ return pa.decimal128(int(precision), int(scale))
221
+ else:
222
+ raise ValueError(
223
+ _dtype_error_msg(
224
+ datasets_dtype,
225
+ "decimal128",
226
+ examples=["decimal128(10, 2)", "decimal128(4, -2)"],
227
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
228
+ )
229
+ )
230
+ elif decimal_internals_bits == "256":
231
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
232
+ if decimal_internals_precision_and_scale:
233
+ precision = decimal_internals_precision_and_scale.group(1)
234
+ scale = decimal_internals_precision_and_scale.group(2)
235
+ return pa.decimal256(int(precision), int(scale))
236
+ else:
237
+ raise ValueError(
238
+ _dtype_error_msg(
239
+ datasets_dtype,
240
+ "decimal256",
241
+ examples=["decimal256(30, 2)", "decimal256(38, -4)"],
242
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
243
+ )
244
+ )
245
+ else:
246
+ raise ValueError(
247
+ _dtype_error_msg(
248
+ datasets_dtype,
249
+ "decimal",
250
+ examples=["decimal128(12, 3)", "decimal256(40, 6)"],
251
+ urls=[
252
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
253
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
254
+ ],
255
+ )
256
+ )
257
+
258
+ raise ValueError(
259
+ f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
260
+ f"Please make sure to use a correct data type, see: "
261
+ f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
262
+ )
263
+
264
+
265
+ def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
266
+ """
267
+ Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
268
+ It works recursively.
269
+
270
+ If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
271
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
272
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
273
+
274
+ Args:
275
+ obj: the object (nested struct) to cast.
276
+ only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
277
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
278
+ Indeed Arrow only support converting 1-dimensional array values.
279
+ optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
280
+ and if it doesn't, not checking the rest of the list elements.
281
+
282
+ Returns:
283
+ casted_obj: the casted object
284
+ has_changed (bool): True if the object has been changed, False if it is identical
285
+ """
286
+
287
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
288
+ import tensorflow as tf
289
+
290
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
291
+ import torch
292
+
293
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
294
+ import jax.numpy as jnp
295
+
296
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
297
+ import PIL.Image
298
+
299
+ if isinstance(obj, np.ndarray):
300
+ if obj.ndim == 0:
301
+ return obj[()], True
302
+ elif not only_1d_for_numpy or obj.ndim == 1:
303
+ return obj, False
304
+ else:
305
+ return (
306
+ [
307
+ _cast_to_python_objects(
308
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
309
+ )[0]
310
+ for x in obj
311
+ ],
312
+ True,
313
+ )
314
+ elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
315
+ if obj.ndim == 0:
316
+ return obj.detach().cpu().numpy()[()], True
317
+ elif not only_1d_for_numpy or obj.ndim == 1:
318
+ return obj.detach().cpu().numpy(), True
319
+ else:
320
+ return (
321
+ [
322
+ _cast_to_python_objects(
323
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
324
+ )[0]
325
+ for x in obj.detach().cpu().numpy()
326
+ ],
327
+ True,
328
+ )
329
+ elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
330
+ if obj.ndim == 0:
331
+ return obj.numpy()[()], True
332
+ elif not only_1d_for_numpy or obj.ndim == 1:
333
+ return obj.numpy(), True
334
+ else:
335
+ return (
336
+ [
337
+ _cast_to_python_objects(
338
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
339
+ )[0]
340
+ for x in obj.numpy()
341
+ ],
342
+ True,
343
+ )
344
+ elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
345
+ if obj.ndim == 0:
346
+ return np.asarray(obj)[()], True
347
+ elif not only_1d_for_numpy or obj.ndim == 1:
348
+ return np.asarray(obj), True
349
+ else:
350
+ return (
351
+ [
352
+ _cast_to_python_objects(
353
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
354
+ )[0]
355
+ for x in np.asarray(obj)
356
+ ],
357
+ True,
358
+ )
359
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
360
+ return encode_pil_image(obj), True
361
+ elif isinstance(obj, pd.Series):
362
+ return (
363
+ _cast_to_python_objects(
364
+ obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
365
+ )[0],
366
+ True,
367
+ )
368
+ elif isinstance(obj, pd.DataFrame):
369
+ return (
370
+ {
371
+ key: _cast_to_python_objects(
372
+ value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
373
+ )[0]
374
+ for key, value in obj.to_dict("series").items()
375
+ },
376
+ True,
377
+ )
378
+ elif isinstance(obj, pd.Timestamp):
379
+ return obj.to_pydatetime(), True
380
+ elif isinstance(obj, pd.Timedelta):
381
+ return obj.to_pytimedelta(), True
382
+ elif isinstance(obj, Mapping):
383
+ has_changed = not isinstance(obj, dict)
384
+ output = {}
385
+ for k, v in obj.items():
386
+ casted_v, has_changed_v = _cast_to_python_objects(
387
+ v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
388
+ )
389
+ has_changed |= has_changed_v
390
+ output[k] = casted_v
391
+ return output if has_changed else obj, has_changed
392
+ elif hasattr(obj, "__array__"):
393
+ return (
394
+ _cast_to_python_objects(
395
+ obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
396
+ )[0],
397
+ True,
398
+ )
399
+ elif isinstance(obj, (list, tuple)):
400
+ if len(obj) > 0:
401
+ for first_elmt in obj:
402
+ if _check_non_null_non_empty_recursive(first_elmt):
403
+ break
404
+ casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
405
+ first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
406
+ )
407
+ if has_changed_first_elmt or not optimize_list_casting:
408
+ return (
409
+ [
410
+ _cast_to_python_objects(
411
+ elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
412
+ )[0]
413
+ for elmt in obj
414
+ ],
415
+ True,
416
+ )
417
+ else:
418
+ if isinstance(obj, (list, tuple)):
419
+ return obj, False
420
+ else:
421
+ return list(obj), True
422
+ else:
423
+ return obj, False
424
+ else:
425
+ return obj, False
426
+
427
+
428
+ def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
429
+ """
430
+ Cast numpy/pytorch/tensorflow/pandas objects to python lists.
431
+ It works recursively.
432
+
433
+ If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
434
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
435
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
436
+
437
+ Args:
438
+ obj: the object (nested struct) to cast
439
+ only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
440
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
441
+ Indeed Arrow only support converting 1-dimensional array values.
442
+ optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
443
+ and if it doesn't, not checking the rest of the list elements.
444
+
445
+ Returns:
446
+ casted_obj: the casted object
447
+ """
448
+ return _cast_to_python_objects(
449
+ obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
450
+ )[0]
451
+
452
+
453
+ @dataclass
454
+ class Value:
455
+ """
456
+ The `Value` dtypes are as follows:
457
+
458
+ - `null`
459
+ - `bool`
460
+ - `int8`
461
+ - `int16`
462
+ - `int32`
463
+ - `int64`
464
+ - `uint8`
465
+ - `uint16`
466
+ - `uint32`
467
+ - `uint64`
468
+ - `float16`
469
+ - `float32` (alias float)
470
+ - `float64` (alias double)
471
+ - `time32[(s|ms)]`
472
+ - `time64[(us|ns)]`
473
+ - `timestamp[(s|ms|us|ns)]`
474
+ - `timestamp[(s|ms|us|ns), tz=(tzstring)]`
475
+ - `date32`
476
+ - `date64`
477
+ - `duration[(s|ms|us|ns)]`
478
+ - `decimal128(precision, scale)`
479
+ - `decimal256(precision, scale)`
480
+ - `binary`
481
+ - `large_binary`
482
+ - `string`
483
+ - `large_string`
484
+
485
+ Example:
486
+
487
+ ```py
488
+ >>> from datasets import Features
489
+ >>> features = Features({'stars': Value(dtype='int32')})
490
+ >>> features
491
+ {'stars': Value(dtype='int32', id=None)}
492
+ ```
493
+ """
494
+
495
+ dtype: str
496
+ id: Optional[str] = None
497
+ # Automatically constructed
498
+ pa_type: ClassVar[Any] = None
499
+ _type: str = field(default="Value", init=False, repr=False)
500
+
501
+ def __post_init__(self):
502
+ if self.dtype == "double": # fix inferred type
503
+ self.dtype = "float64"
504
+ if self.dtype == "float": # fix inferred type
505
+ self.dtype = "float32"
506
+ self.pa_type = string_to_arrow(self.dtype)
507
+
508
+ def __call__(self):
509
+ return self.pa_type
510
+
511
+ def encode_example(self, value):
512
+ if pa.types.is_boolean(self.pa_type):
513
+ return bool(value)
514
+ elif pa.types.is_integer(self.pa_type):
515
+ return int(value)
516
+ elif pa.types.is_floating(self.pa_type):
517
+ return float(value)
518
+ elif pa.types.is_string(self.pa_type):
519
+ return str(value)
520
+ else:
521
+ return value
522
+
523
+
524
+ class _ArrayXD:
525
+ def __post_init__(self):
526
+ self.shape = tuple(self.shape)
527
+
528
+ def __call__(self):
529
+ pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
530
+ return pa_type
531
+
532
+ def encode_example(self, value):
533
+ return value
534
+
535
+
536
+ @dataclass
537
+ class Array2D(_ArrayXD):
538
+ """Create a two-dimensional array.
539
+
540
+ Args:
541
+ shape (`tuple`):
542
+ The size of each dimension.
543
+ dtype (`str`):
544
+ The value of the data type.
545
+
546
+ Example:
547
+
548
+ ```py
549
+ >>> from datasets import Features
550
+ >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
551
+ ```
552
+ """
553
+
554
+ shape: tuple
555
+ dtype: str
556
+ id: Optional[str] = None
557
+ # Automatically constructed
558
+ _type: str = field(default="Array2D", init=False, repr=False)
559
+
560
+
561
+ @dataclass
562
+ class Array3D(_ArrayXD):
563
+ """Create a three-dimensional array.
564
+
565
+ Args:
566
+ shape (`tuple`):
567
+ The size of each dimension.
568
+ dtype (`str`):
569
+ The value of the data type.
570
+
571
+ Example:
572
+
573
+ ```py
574
+ >>> from datasets import Features
575
+ >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
576
+ ```
577
+ """
578
+
579
+ shape: tuple
580
+ dtype: str
581
+ id: Optional[str] = None
582
+ # Automatically constructed
583
+ _type: str = field(default="Array3D", init=False, repr=False)
584
+
585
+
586
+ @dataclass
587
+ class Array4D(_ArrayXD):
588
+ """Create a four-dimensional array.
589
+
590
+ Args:
591
+ shape (`tuple`):
592
+ The size of each dimension.
593
+ dtype (`str`):
594
+ The value of the data type.
595
+
596
+ Example:
597
+
598
+ ```py
599
+ >>> from datasets import Features
600
+ >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
601
+ ```
602
+ """
603
+
604
+ shape: tuple
605
+ dtype: str
606
+ id: Optional[str] = None
607
+ # Automatically constructed
608
+ _type: str = field(default="Array4D", init=False, repr=False)
609
+
610
+
611
+ @dataclass
612
+ class Array5D(_ArrayXD):
613
+ """Create a five-dimensional array.
614
+
615
+ Args:
616
+ shape (`tuple`):
617
+ The size of each dimension.
618
+ dtype (`str`):
619
+ The value of the data type.
620
+
621
+ Example:
622
+
623
+ ```py
624
+ >>> from datasets import Features
625
+ >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
626
+ ```
627
+ """
628
+
629
+ shape: tuple
630
+ dtype: str
631
+ id: Optional[str] = None
632
+ # Automatically constructed
633
+ _type: str = field(default="Array5D", init=False, repr=False)
634
+
635
+
636
+ class _ArrayXDExtensionType(pa.ExtensionType):
637
+ ndims: Optional[int] = None
638
+
639
+ def __init__(self, shape: tuple, dtype: str):
640
+ if self.ndims is None or self.ndims <= 1:
641
+ raise ValueError("You must instantiate an array type with a value for dim that is > 1")
642
+ if len(shape) != self.ndims:
643
+ raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
644
+ for dim in range(1, self.ndims):
645
+ if shape[dim] is None:
646
+ raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
647
+ self.shape = tuple(shape)
648
+ self.value_type = dtype
649
+ self.storage_dtype = self._generate_dtype(self.value_type)
650
+ pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
651
+
652
+ def __arrow_ext_serialize__(self):
653
+ return json.dumps((self.shape, self.value_type)).encode()
654
+
655
+ @classmethod
656
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
657
+ args = json.loads(serialized)
658
+ return cls(*args)
659
+
660
+ # This was added to pa.ExtensionType in pyarrow >= 13.0.0
661
+ def __reduce__(self):
662
+ return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
663
+
664
+ def __hash__(self):
665
+ return hash((self.__class__, self.shape, self.value_type))
666
+
667
+ def __arrow_ext_class__(self):
668
+ return ArrayExtensionArray
669
+
670
+ def _generate_dtype(self, dtype):
671
+ dtype = string_to_arrow(dtype)
672
+ for d in reversed(self.shape):
673
+ dtype = pa.list_(dtype)
674
+ # Don't specify the size of the list, since fixed length list arrays have issues
675
+ # being validated after slicing in pyarrow 0.17.1
676
+ return dtype
677
+
678
+ def to_pandas_dtype(self):
679
+ return PandasArrayExtensionDtype(self.value_type)
680
+
681
+
682
+ class Array2DExtensionType(_ArrayXDExtensionType):
683
+ ndims = 2
684
+
685
+
686
+ class Array3DExtensionType(_ArrayXDExtensionType):
687
+ ndims = 3
688
+
689
+
690
+ class Array4DExtensionType(_ArrayXDExtensionType):
691
+ ndims = 4
692
+
693
+
694
+ class Array5DExtensionType(_ArrayXDExtensionType):
695
+ ndims = 5
696
+
697
+
698
+ # Register the extension types for deserialization
699
+ pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
700
+ pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
701
+ pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
702
+ pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
703
+
704
+
705
+ def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
706
+ """
707
+ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
708
+ This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
709
+
710
+ # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
711
+ # primitive types are types for which the physical representation in arrow and in numpy
712
+ # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
713
+ # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
714
+ # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
715
+ """
716
+
717
+ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
718
+ if pa.types.is_list(pa_type):
719
+ return _unnest_pa_type(pa_type.value_type)
720
+ return pa_type
721
+
722
+ if unnest:
723
+ pa_type = _unnest_pa_type(pa_type)
724
+ return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
725
+
726
+
727
+ class ArrayExtensionArray(pa.ExtensionArray):
728
+ def __array__(self):
729
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
730
+ return self.to_numpy(zero_copy_only=zero_copy_only)
731
+
732
+ def __getitem__(self, i):
733
+ return self.storage[i]
734
+
735
+ def to_numpy(self, zero_copy_only=True):
736
+ storage: pa.ListArray = self.storage
737
+ null_mask = storage.is_null().to_numpy(zero_copy_only=False)
738
+
739
+ if self.type.shape[0] is not None:
740
+ size = 1
741
+ null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
742
+
743
+ for i in range(self.type.ndims):
744
+ size *= self.type.shape[i]
745
+ storage = storage.flatten()
746
+ numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
747
+ numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
748
+
749
+ if len(null_indices):
750
+ numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
751
+
752
+ else:
753
+ shape = self.type.shape
754
+ ndims = self.type.ndims
755
+ arrays = []
756
+ first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
757
+ for i, is_null in enumerate(null_mask):
758
+ if is_null:
759
+ arrays.append(np.nan)
760
+ else:
761
+ storage_el = storage[i : i + 1]
762
+ first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
763
+ # flatten storage
764
+ for _ in range(ndims):
765
+ storage_el = storage_el.flatten()
766
+
767
+ numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
768
+ arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
769
+
770
+ if len(np.unique(np.diff(first_dim_offsets))) > 1:
771
+ # ragged
772
+ numpy_arr = np.empty(len(arrays), dtype=object)
773
+ numpy_arr[:] = arrays
774
+ else:
775
+ numpy_arr = np.array(arrays)
776
+
777
+ return numpy_arr
778
+
779
+ def to_pylist(self):
780
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
781
+ numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
782
+ if self.type.shape[0] is None and numpy_arr.dtype == object:
783
+ return [arr.tolist() for arr in numpy_arr.tolist()]
784
+ else:
785
+ return numpy_arr.tolist()
786
+
787
+
788
+ class PandasArrayExtensionDtype(PandasExtensionDtype):
789
+ _metadata = "value_type"
790
+
791
+ def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
792
+ self._value_type = value_type
793
+
794
+ def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
795
+ if isinstance(array, pa.ChunkedArray):
796
+ array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
797
+ zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
798
+ numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
799
+ return PandasArrayExtensionArray(numpy_arr)
800
+
801
+ @classmethod
802
+ def construct_array_type(cls):
803
+ return PandasArrayExtensionArray
804
+
805
+ @property
806
+ def type(self) -> type:
807
+ return np.ndarray
808
+
809
+ @property
810
+ def kind(self) -> str:
811
+ return "O"
812
+
813
+ @property
814
+ def name(self) -> str:
815
+ return f"array[{self.value_type}]"
816
+
817
+ @property
818
+ def value_type(self) -> np.dtype:
819
+ return self._value_type
820
+
821
+
822
+ class PandasArrayExtensionArray(PandasExtensionArray):
823
+ def __init__(self, data: np.ndarray, copy: bool = False):
824
+ self._data = data if not copy else np.array(data)
825
+ self._dtype = PandasArrayExtensionDtype(data.dtype)
826
+
827
+ def __array__(self, dtype=None):
828
+ """
829
+ Convert to NumPy Array.
830
+ Note that Pandas expects a 1D array when dtype is set to object.
831
+ But for other dtypes, the returned shape is the same as the one of ``data``.
832
+
833
+ More info about pandas 1D requirement for PandasExtensionArray here:
834
+ https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
835
+
836
+ """
837
+ if dtype == object:
838
+ out = np.empty(len(self._data), dtype=object)
839
+ for i in range(len(self._data)):
840
+ out[i] = self._data[i]
841
+ return out
842
+ if dtype is None:
843
+ return self._data
844
+ else:
845
+ return self._data.astype(dtype)
846
+
847
+ def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
848
+ return PandasArrayExtensionArray(self._data, copy=True)
849
+
850
+ @classmethod
851
+ def _from_sequence(
852
+ cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
853
+ ) -> "PandasArrayExtensionArray":
854
+ if len(scalars) > 1 and all(
855
+ isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
856
+ ):
857
+ data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
858
+ else:
859
+ data = np.empty(len(scalars), dtype=object)
860
+ data[:] = scalars
861
+ return cls(data, copy=copy)
862
+
863
+ @classmethod
864
+ def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
865
+ if len(to_concat) > 1 and all(
866
+ va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
867
+ for va in to_concat
868
+ ):
869
+ data = np.vstack([va._data for va in to_concat])
870
+ else:
871
+ data = np.empty(len(to_concat), dtype=object)
872
+ data[:] = [va._data for va in to_concat]
873
+ return cls(data, copy=False)
874
+
875
+ @property
876
+ def dtype(self) -> PandasArrayExtensionDtype:
877
+ return self._dtype
878
+
879
+ @property
880
+ def nbytes(self) -> int:
881
+ return self._data.nbytes
882
+
883
+ def isna(self) -> np.ndarray:
884
+ return np.array([pd.isna(arr).any() for arr in self._data])
885
+
886
+ def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
887
+ raise NotImplementedError()
888
+
889
+ def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
890
+ if isinstance(item, int):
891
+ return self._data[item]
892
+ return PandasArrayExtensionArray(self._data[item], copy=False)
893
+
894
+ def take(
895
+ self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
896
+ ) -> "PandasArrayExtensionArray":
897
+ indices: np.ndarray = np.asarray(indices, dtype=int)
898
+ if allow_fill:
899
+ fill_value = (
900
+ self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
901
+ )
902
+ mask = indices == -1
903
+ if (indices < -1).any():
904
+ raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
905
+ elif len(self) > 0:
906
+ pass
907
+ elif not np.all(mask):
908
+ raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
909
+ else:
910
+ data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
911
+ return PandasArrayExtensionArray(data, copy=False)
912
+ took = self._data.take(indices, axis=0)
913
+ if allow_fill and mask.any():
914
+ took[mask] = [fill_value] * np.sum(mask)
915
+ return PandasArrayExtensionArray(took, copy=False)
916
+
917
+ def __len__(self) -> int:
918
+ return len(self._data)
919
+
920
+ def __eq__(self, other) -> np.ndarray:
921
+ if not isinstance(other, PandasArrayExtensionArray):
922
+ raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
923
+ return (self._data == other._data).all()
924
+
925
+
926
+ def pandas_types_mapper(dtype):
927
+ if isinstance(dtype, _ArrayXDExtensionType):
928
+ return PandasArrayExtensionDtype(dtype.value_type)
929
+
930
+
931
+ @dataclass
932
+ class ClassLabel:
933
+ """Feature type for integer class labels.
934
+
935
+ There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
936
+
937
+ * `num_classes`: Create 0 to (num_classes-1) labels.
938
+ * `names`: List of label strings.
939
+ * `names_file`: File containing the list of labels.
940
+
941
+ Under the hood the labels are stored as integers.
942
+ You can use negative integers to represent unknown/missing labels.
943
+
944
+ Args:
945
+ num_classes (`int`, *optional*):
946
+ Number of classes. All labels must be < `num_classes`.
947
+ names (`list` of `str`, *optional*):
948
+ String names for the integer classes.
949
+ The order in which the names are provided is kept.
950
+ names_file (`str`, *optional*):
951
+ Path to a file with names for the integer classes, one per line.
952
+
953
+ Example:
954
+
955
+ ```py
956
+ >>> from datasets import Features
957
+ >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
958
+ >>> features
959
+ {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
960
+ ```
961
+ """
962
+
963
+ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
964
+ names: List[str] = None
965
+ names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
966
+ id: Optional[str] = None
967
+ # Automatically constructed
968
+ dtype: ClassVar[str] = "int64"
969
+ pa_type: ClassVar[Any] = pa.int64()
970
+ _str2int: ClassVar[Dict[str, int]] = None
971
+ _int2str: ClassVar[Dict[int, int]] = None
972
+ _type: str = field(default="ClassLabel", init=False, repr=False)
973
+
974
+ def __post_init__(self, num_classes, names_file):
975
+ self.num_classes = num_classes
976
+ self.names_file = names_file
977
+ if self.names_file is not None and self.names is not None:
978
+ raise ValueError("Please provide either names or names_file but not both.")
979
+ # Set self.names
980
+ if self.names is None:
981
+ if self.names_file is not None:
982
+ self.names = self._load_names_from_file(self.names_file)
983
+ elif self.num_classes is not None:
984
+ self.names = [str(i) for i in range(self.num_classes)]
985
+ else:
986
+ raise ValueError("Please provide either num_classes, names or names_file.")
987
+ elif not isinstance(self.names, SequenceABC):
988
+ raise TypeError(f"Please provide names as a list, is {type(self.names)}")
989
+ # Set self.num_classes
990
+ if self.num_classes is None:
991
+ self.num_classes = len(self.names)
992
+ elif self.num_classes != len(self.names):
993
+ raise ValueError(
994
+ "ClassLabel number of names do not match the defined num_classes. "
995
+ f"Got {len(self.names)} names VS {self.num_classes} num_classes"
996
+ )
997
+ # Prepare mappings
998
+ self._int2str = [str(name) for name in self.names]
999
+ self._str2int = {name: i for i, name in enumerate(self._int2str)}
1000
+ if len(self._int2str) != len(self._str2int):
1001
+ raise ValueError("Some label names are duplicated. Each label name should be unique.")
1002
+
1003
+ def __call__(self):
1004
+ return self.pa_type
1005
+
1006
+ def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
1007
+ """Conversion class name `string` => `integer`.
1008
+
1009
+ Example:
1010
+
1011
+ ```py
1012
+ >>> from datasets import load_dataset
1013
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
1014
+ >>> ds.features["label"].str2int('neg')
1015
+ 0
1016
+ ```
1017
+ """
1018
+ if not isinstance(values, str) and not isinstance(values, Iterable):
1019
+ raise ValueError(
1020
+ f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
1021
+ )
1022
+ return_list = True
1023
+ if isinstance(values, str):
1024
+ values = [values]
1025
+ return_list = False
1026
+
1027
+ output = [self._strval2int(value) for value in values]
1028
+ return output if return_list else output[0]
1029
+
1030
+ def _strval2int(self, value: str) -> int:
1031
+ failed_parse = False
1032
+ value = str(value)
1033
+ # first attempt - raw string value
1034
+ int_value = self._str2int.get(value)
1035
+ if int_value is None:
1036
+ # second attempt - strip whitespace
1037
+ int_value = self._str2int.get(value.strip())
1038
+ if int_value is None:
1039
+ # third attempt - convert str to int
1040
+ try:
1041
+ int_value = int(value)
1042
+ except ValueError:
1043
+ failed_parse = True
1044
+ else:
1045
+ if int_value < -1 or int_value >= self.num_classes:
1046
+ failed_parse = True
1047
+ if failed_parse:
1048
+ raise ValueError(f"Invalid string class label {value}")
1049
+ return int_value
1050
+
1051
+ def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
1052
+ """Conversion `integer` => class name `string`.
1053
+
1054
+ Regarding unknown/missing labels: passing negative integers raises `ValueError`.
1055
+
1056
+ Example:
1057
+
1058
+ ```py
1059
+ >>> from datasets import load_dataset
1060
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
1061
+ >>> ds.features["label"].int2str(0)
1062
+ 'neg'
1063
+ ```
1064
+ """
1065
+ if not isinstance(values, int) and not isinstance(values, Iterable):
1066
+ raise ValueError(
1067
+ f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
1068
+ )
1069
+ return_list = True
1070
+ if isinstance(values, int):
1071
+ values = [values]
1072
+ return_list = False
1073
+
1074
+ for v in values:
1075
+ if not 0 <= v < self.num_classes:
1076
+ raise ValueError(f"Invalid integer class label {v:d}")
1077
+
1078
+ output = [self._int2str[int(v)] for v in values]
1079
+ return output if return_list else output[0]
1080
+
1081
+ def encode_example(self, example_data):
1082
+ if self.num_classes is None:
1083
+ raise ValueError(
1084
+ "Trying to use ClassLabel feature with undefined number of class. "
1085
+ "Please set ClassLabel.names or num_classes."
1086
+ )
1087
+
1088
+ # If a string is given, convert to associated integer
1089
+ if isinstance(example_data, str):
1090
+ example_data = self.str2int(example_data)
1091
+
1092
+ # Allowing -1 to mean no label.
1093
+ if not -1 <= example_data < self.num_classes:
1094
+ raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
1095
+ return example_data
1096
+
1097
+ def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
1098
+ """Cast an Arrow array to the `ClassLabel` arrow storage type.
1099
+ The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
1100
+
1101
+ - `pa.string()`
1102
+ - `pa.int()`
1103
+
1104
+ Args:
1105
+ storage (`Union[pa.StringArray, pa.IntegerArray]`):
1106
+ PyArrow array to cast.
1107
+
1108
+ Returns:
1109
+ `pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
1110
+ """
1111
+ if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
1112
+ min_max = pc.min_max(storage).as_py()
1113
+ if min_max["max"] is not None and min_max["max"] >= self.num_classes:
1114
+ raise ValueError(
1115
+ f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
1116
+ )
1117
+ elif isinstance(storage, pa.StringArray):
1118
+ storage = pa.array(
1119
+ [self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
1120
+ )
1121
+ return array_cast(storage, self.pa_type)
1122
+
1123
+ @staticmethod
1124
+ def _load_names_from_file(names_filepath):
1125
+ with open(names_filepath, encoding="utf-8") as f:
1126
+ return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
1127
+
1128
+
1129
+ @dataclass
1130
+ class Sequence:
1131
+ """Construct a list of feature from a single type or a dict of types.
1132
+ Mostly here for compatiblity with tfds.
1133
+
1134
+ Args:
1135
+ feature:
1136
+ A list of features of a single type or a dictionary of types.
1137
+ length (`int`):
1138
+ Length of the sequence.
1139
+
1140
+ Example:
1141
+
1142
+ ```py
1143
+ >>> from datasets import Features, Sequence, Value, ClassLabel
1144
+ >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
1145
+ >>> features
1146
+ {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
1147
+ ```
1148
+ """
1149
+
1150
+ feature: Any
1151
+ length: int = -1
1152
+ id: Optional[str] = None
1153
+ # Automatically constructed
1154
+ dtype: ClassVar[str] = "list"
1155
+ pa_type: ClassVar[Any] = None
1156
+ _type: str = field(default="Sequence", init=False, repr=False)
1157
+
1158
+
1159
+ FeatureType = Union[
1160
+ dict,
1161
+ list,
1162
+ tuple,
1163
+ Value,
1164
+ ClassLabel,
1165
+ Translation,
1166
+ TranslationVariableLanguages,
1167
+ Sequence,
1168
+ Array2D,
1169
+ Array3D,
1170
+ Array4D,
1171
+ Array5D,
1172
+ Audio,
1173
+ Image,
1174
+ ]
1175
+
1176
+
1177
+ def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
1178
+ """
1179
+ Check if the object is not None.
1180
+ If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
1181
+ """
1182
+ if obj is None:
1183
+ return False
1184
+ elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
1185
+ if len(obj) > 0:
1186
+ if schema is None:
1187
+ pass
1188
+ elif isinstance(schema, (list, tuple)):
1189
+ schema = schema[0]
1190
+ else:
1191
+ schema = schema.feature
1192
+ return _check_non_null_non_empty_recursive(obj[0], schema)
1193
+ else:
1194
+ return False
1195
+ else:
1196
+ return True
1197
+
1198
+
1199
+ def get_nested_type(schema: FeatureType) -> pa.DataType:
1200
+ """
1201
+ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
1202
+ generate_from_arrow_type().
1203
+
1204
+ It performs double-duty as the implementation of Features.type and handles the conversion of
1205
+ datasets.Feature->pa.struct
1206
+ """
1207
+ # Nested structures: we allow dict, list/tuples, sequences
1208
+ if isinstance(schema, Features):
1209
+ return pa.struct(
1210
+ {key: get_nested_type(schema[key]) for key in schema}
1211
+ ) # Features is subclass of dict, and dict order is deterministic since Python 3.6
1212
+ elif isinstance(schema, dict):
1213
+ return pa.struct(
1214
+ {key: get_nested_type(schema[key]) for key in schema}
1215
+ ) # however don't sort on struct types since the order matters
1216
+ elif isinstance(schema, (list, tuple)):
1217
+ if len(schema) != 1:
1218
+ raise ValueError("When defining list feature, you should just provide one example of the inner type")
1219
+ value_type = get_nested_type(schema[0])
1220
+ return pa.list_(value_type)
1221
+ elif isinstance(schema, Sequence):
1222
+ value_type = get_nested_type(schema.feature)
1223
+ # We allow to reverse list of dict => dict of list for compatibility with tfds
1224
+ if isinstance(schema.feature, dict):
1225
+ return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
1226
+ return pa.list_(value_type, schema.length)
1227
+
1228
+ # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
1229
+ return schema()
1230
+
1231
+
1232
+ def encode_nested_example(schema, obj, level=0):
1233
+ """Encode a nested example.
1234
+ This is used since some features (in particular ClassLabel) have some logic during encoding.
1235
+
1236
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
1237
+ If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
1238
+ """
1239
+ # Nested structures: we allow dict, list/tuples, sequences
1240
+ if isinstance(schema, dict):
1241
+ if level == 0 and obj is None:
1242
+ raise ValueError("Got None but expected a dictionary instead")
1243
+ return (
1244
+ {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
1245
+ if obj is not None
1246
+ else None
1247
+ )
1248
+
1249
+ elif isinstance(schema, (list, tuple)):
1250
+ sub_schema = schema[0]
1251
+ if obj is None:
1252
+ return None
1253
+ else:
1254
+ if len(obj) > 0:
1255
+ for first_elmt in obj:
1256
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
1257
+ break
1258
+ if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
1259
+ return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
1260
+ return list(obj)
1261
+ elif isinstance(schema, Sequence):
1262
+ if obj is None:
1263
+ return None
1264
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
1265
+ if isinstance(schema.feature, dict):
1266
+ # dict of list to fill
1267
+ list_dict = {}
1268
+ if isinstance(obj, (list, tuple)):
1269
+ # obj is a list of dict
1270
+ for k in schema.feature:
1271
+ list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
1272
+ return list_dict
1273
+ else:
1274
+ # obj is a single dict
1275
+ for k in schema.feature:
1276
+ list_dict[k] = (
1277
+ [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
1278
+ if k in obj
1279
+ else None
1280
+ )
1281
+ return list_dict
1282
+ # schema.feature is not a dict
1283
+ if isinstance(obj, str): # don't interpret a string as a list
1284
+ raise ValueError(f"Got a string but expected a list instead: '{obj}'")
1285
+ else:
1286
+ if len(obj) > 0:
1287
+ for first_elmt in obj:
1288
+ if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
1289
+ break
1290
+ # be careful when comparing tensors here
1291
+ if (
1292
+ not isinstance(first_elmt, list)
1293
+ or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
1294
+ ):
1295
+ return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
1296
+ return list(obj)
1297
+ # Object with special encoding:
1298
+ # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
1299
+ elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
1300
+ return schema.encode_example(obj) if obj is not None else None
1301
+ # Other object should be directly convertible to a native Arrow type (like Translation and Translation)
1302
+ return obj
1303
+
1304
+
1305
+ def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
1306
+ """Decode a nested example.
1307
+ This is used since some features (in particular Audio and Image) have some logic during decoding.
1308
+
1309
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
1310
+ If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
1311
+ """
1312
+ # Nested structures: we allow dict, list/tuples, sequences
1313
+ if isinstance(schema, dict):
1314
+ return (
1315
+ {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
1316
+ if obj is not None
1317
+ else None
1318
+ )
1319
+ elif isinstance(schema, (list, tuple)):
1320
+ sub_schema = schema[0]
1321
+ if obj is None:
1322
+ return None
1323
+ else:
1324
+ if len(obj) > 0:
1325
+ for first_elmt in obj:
1326
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
1327
+ break
1328
+ if decode_nested_example(sub_schema, first_elmt) != first_elmt:
1329
+ return [decode_nested_example(sub_schema, o) for o in obj]
1330
+ return list(obj)
1331
+ elif isinstance(schema, Sequence):
1332
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
1333
+ if isinstance(schema.feature, dict):
1334
+ return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
1335
+ else:
1336
+ return decode_nested_example([schema.feature], obj)
1337
+ # Object with special decoding:
1338
+ elif isinstance(schema, (Audio, Image)):
1339
+ # we pass the token to read and decode files from private repositories in streaming mode
1340
+ if obj is not None and schema.decode:
1341
+ return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
1342
+ return obj
1343
+
1344
+
1345
+ _FEATURE_TYPES: Dict[str, FeatureType] = {
1346
+ Value.__name__: Value,
1347
+ ClassLabel.__name__: ClassLabel,
1348
+ Translation.__name__: Translation,
1349
+ TranslationVariableLanguages.__name__: TranslationVariableLanguages,
1350
+ Sequence.__name__: Sequence,
1351
+ Array2D.__name__: Array2D,
1352
+ Array3D.__name__: Array3D,
1353
+ Array4D.__name__: Array4D,
1354
+ Array5D.__name__: Array5D,
1355
+ Audio.__name__: Audio,
1356
+ Image.__name__: Image,
1357
+ }
1358
+
1359
+
1360
+ @experimental
1361
+ def register_feature(
1362
+ feature_cls: type,
1363
+ feature_type: str,
1364
+ ):
1365
+ """
1366
+ Register a Feature object using a name and class.
1367
+ This function must be used on a Feature class.
1368
+ """
1369
+ if feature_type in _FEATURE_TYPES:
1370
+ logger.warning(
1371
+ f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})"
1372
+ )
1373
+ _FEATURE_TYPES[feature_type] = feature_cls
1374
+
1375
+
1376
+ def generate_from_dict(obj: Any):
1377
+ """Regenerate the nested feature object from a deserialized dict.
1378
+ We use the '_type' fields to get the dataclass name to load.
1379
+
1380
+ generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
1381
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
1382
+ a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
1383
+ :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
1384
+ mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
1385
+ that :class:`Value` automatically performs.
1386
+ """
1387
+ # Nested structures: we allow dict, list/tuples, sequences
1388
+ if isinstance(obj, list):
1389
+ return [generate_from_dict(value) for value in obj]
1390
+ # Otherwise we have a dict or a dataclass
1391
+ if "_type" not in obj or isinstance(obj["_type"], dict):
1392
+ return {key: generate_from_dict(value) for key, value in obj.items()}
1393
+ obj = dict(obj)
1394
+ _type = obj.pop("_type")
1395
+ class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None)
1396
+
1397
+ if class_type is None:
1398
+ raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}")
1399
+
1400
+ if class_type == Sequence:
1401
+ return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
1402
+
1403
+ field_names = {f.name for f in fields(class_type)}
1404
+ return class_type(**{k: v for k, v in obj.items() if k in field_names})
1405
+
1406
+
1407
+ def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
1408
+ """
1409
+ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
1410
+ a single field.
1411
+
1412
+ This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
1413
+
1414
+ This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
1415
+ full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
1416
+ """
1417
+ if isinstance(pa_type, pa.StructType):
1418
+ return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
1419
+ elif isinstance(pa_type, pa.FixedSizeListType):
1420
+ return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
1421
+ elif isinstance(pa_type, pa.ListType):
1422
+ feature = generate_from_arrow_type(pa_type.value_type)
1423
+ if isinstance(feature, (dict, tuple, list)):
1424
+ return [feature]
1425
+ return Sequence(feature=feature)
1426
+ elif isinstance(pa_type, _ArrayXDExtensionType):
1427
+ array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
1428
+ return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
1429
+ elif isinstance(pa_type, pa.DictionaryType):
1430
+ raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
1431
+ elif isinstance(pa_type, pa.DataType):
1432
+ return Value(dtype=_arrow_to_datasets_dtype(pa_type))
1433
+ else:
1434
+ raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
1435
+
1436
+
1437
+ def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
1438
+ """Build a PyArrow ListArray from a multidimensional NumPy array"""
1439
+ arr = np.array(arr)
1440
+ values = pa.array(arr.flatten(), type=type)
1441
+ for i in range(arr.ndim - 1):
1442
+ n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
1443
+ step_offsets = arr.shape[arr.ndim - i - 1]
1444
+ offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
1445
+ values = pa.ListArray.from_arrays(offsets, values)
1446
+ return values
1447
+
1448
+
1449
+ def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
1450
+ null_mask = np.array([arr is None for arr in l_arr])
1451
+ null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
1452
+ l_arr = [arr for arr in l_arr if arr is not None]
1453
+ offsets = np.cumsum(
1454
+ [0] + [len(arr) for arr in l_arr], dtype=object
1455
+ ) # convert to dtype object to allow None insertion
1456
+ offsets = np.insert(offsets, null_indices, None)
1457
+ offsets = pa.array(offsets, type=pa.int32())
1458
+ values = pa.concat_arrays(l_arr)
1459
+ return pa.ListArray.from_arrays(offsets, values)
1460
+
1461
+
1462
+ def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
1463
+ """Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
1464
+ if len(l_arr) > 0:
1465
+ return list_of_pa_arrays_to_pyarrow_listarray(
1466
+ [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
1467
+ )
1468
+ else:
1469
+ return pa.array([], type=type)
1470
+
1471
+
1472
+ def contains_any_np_array(data: Any):
1473
+ """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
1474
+
1475
+ Args:
1476
+ data (Any): Data.
1477
+
1478
+ Returns:
1479
+ bool
1480
+ """
1481
+ if isinstance(data, np.ndarray):
1482
+ return True
1483
+ elif isinstance(data, list):
1484
+ return contains_any_np_array(first_non_null_value(data)[1])
1485
+ else:
1486
+ return False
1487
+
1488
+
1489
+ def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
1490
+ """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
1491
+
1492
+ Args:
1493
+ data (Union[np.ndarray, List]): Data.
1494
+ type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
1495
+
1496
+ Returns:
1497
+ pa.ListArray
1498
+ """
1499
+ if isinstance(data, np.ndarray):
1500
+ return numpy_to_pyarrow_listarray(data, type=type)
1501
+ elif isinstance(data, list):
1502
+ return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
1503
+
1504
+
1505
+ def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
1506
+ """Convert to PyArrow ListArray.
1507
+
1508
+ Args:
1509
+ data (Any): Sequence, iterable, np.ndarray or pd.Series.
1510
+ pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
1511
+
1512
+ Returns:
1513
+ pyarrow.Array
1514
+ """
1515
+ if contains_any_np_array(data):
1516
+ return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
1517
+ else:
1518
+ return pa.array(data, pa_type.storage_dtype)
1519
+
1520
+
1521
+ def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
1522
+ """Visit a (possibly nested) feature.
1523
+
1524
+ Args:
1525
+ feature (FeatureType): the feature type to be checked
1526
+ Returns:
1527
+ visited feature (FeatureType)
1528
+ """
1529
+ if isinstance(feature, dict):
1530
+ out = func({k: _visit(f, func) for k, f in feature.items()})
1531
+ elif isinstance(feature, (list, tuple)):
1532
+ out = func([_visit(feature[0], func)])
1533
+ elif isinstance(feature, Sequence):
1534
+ out = func(Sequence(_visit(feature.feature, func), length=feature.length))
1535
+ else:
1536
+ out = func(feature)
1537
+ return feature if out is None else out
1538
+
1539
+
1540
+ def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
1541
+ """Check if a (possibly nested) feature requires decoding.
1542
+
1543
+ Args:
1544
+ feature (FeatureType): the feature type to be checked
1545
+ ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
1546
+ of the `decode` attribute of the decodable feature types.
1547
+ Returns:
1548
+ :obj:`bool`
1549
+ """
1550
+ if isinstance(feature, dict):
1551
+ return any(require_decoding(f) for f in feature.values())
1552
+ elif isinstance(feature, (list, tuple)):
1553
+ return require_decoding(feature[0])
1554
+ elif isinstance(feature, Sequence):
1555
+ return require_decoding(feature.feature)
1556
+ else:
1557
+ return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
1558
+
1559
+
1560
+ def require_storage_cast(feature: FeatureType) -> bool:
1561
+ """Check if a (possibly nested) feature requires storage casting.
1562
+
1563
+ Args:
1564
+ feature (FeatureType): the feature type to be checked
1565
+ Returns:
1566
+ :obj:`bool`
1567
+ """
1568
+ if isinstance(feature, dict):
1569
+ return any(require_storage_cast(f) for f in feature.values())
1570
+ elif isinstance(feature, (list, tuple)):
1571
+ return require_storage_cast(feature[0])
1572
+ elif isinstance(feature, Sequence):
1573
+ return require_storage_cast(feature.feature)
1574
+ else:
1575
+ return hasattr(feature, "cast_storage")
1576
+
1577
+
1578
+ def require_storage_embed(feature: FeatureType) -> bool:
1579
+ """Check if a (possibly nested) feature requires embedding data into storage.
1580
+
1581
+ Args:
1582
+ feature (FeatureType): the feature type to be checked
1583
+ Returns:
1584
+ :obj:`bool`
1585
+ """
1586
+ if isinstance(feature, dict):
1587
+ return any(require_storage_cast(f) for f in feature.values())
1588
+ elif isinstance(feature, (list, tuple)):
1589
+ return require_storage_cast(feature[0])
1590
+ elif isinstance(feature, Sequence):
1591
+ return require_storage_cast(feature.feature)
1592
+ else:
1593
+ return hasattr(feature, "embed_storage")
1594
+
1595
+
1596
+ def keep_features_dicts_synced(func):
1597
+ """
1598
+ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
1599
+ in sync with the main dictionary.
1600
+ """
1601
+
1602
+ @wraps(func)
1603
+ def wrapper(*args, **kwargs):
1604
+ if args:
1605
+ self: "Features" = args[0]
1606
+ args = args[1:]
1607
+ else:
1608
+ self: "Features" = kwargs.pop("self")
1609
+ out = func(self, *args, **kwargs)
1610
+ assert hasattr(self, "_column_requires_decoding")
1611
+ self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
1612
+ return out
1613
+
1614
+ wrapper._decorator_name_ = "_keep_dicts_synced"
1615
+ return wrapper
1616
+
1617
+
1618
+ class Features(dict):
1619
+ """A special dictionary that defines the internal structure of a dataset.
1620
+
1621
+ Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
1622
+ and values are the type of that column.
1623
+
1624
+ `FieldType` can be one of the following:
1625
+ - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
1626
+ - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
1627
+ associated to them and will be stored as integers in the dataset.
1628
+ - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
1629
+ features. It's possible to have nested fields of nested fields in an arbitrary manner.
1630
+ - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
1631
+ `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
1632
+ type hosted in this list.
1633
+
1634
+ <Tip>
1635
+
1636
+ A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
1637
+ lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
1638
+ un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
1639
+ [`~datasets.Sequence`].
1640
+
1641
+ </Tip>
1642
+
1643
+ - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
1644
+ - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
1645
+ to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
1646
+ - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
1647
+ or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
1648
+ - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
1649
+ """
1650
+
1651
+ def __init__(*args, **kwargs):
1652
+ # self not in the signature to allow passing self as a kwarg
1653
+ if not args:
1654
+ raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
1655
+ self, *args = args
1656
+ super(Features, self).__init__(*args, **kwargs)
1657
+ self._column_requires_decoding: Dict[str, bool] = {
1658
+ col: require_decoding(feature) for col, feature in self.items()
1659
+ }
1660
+
1661
+ __setitem__ = keep_features_dicts_synced(dict.__setitem__)
1662
+ __delitem__ = keep_features_dicts_synced(dict.__delitem__)
1663
+ update = keep_features_dicts_synced(dict.update)
1664
+ setdefault = keep_features_dicts_synced(dict.setdefault)
1665
+ pop = keep_features_dicts_synced(dict.pop)
1666
+ popitem = keep_features_dicts_synced(dict.popitem)
1667
+ clear = keep_features_dicts_synced(dict.clear)
1668
+
1669
+ def __reduce__(self):
1670
+ return Features, (dict(self),)
1671
+
1672
+ @property
1673
+ def type(self):
1674
+ """
1675
+ Features field types.
1676
+
1677
+ Returns:
1678
+ :obj:`pyarrow.DataType`
1679
+ """
1680
+ return get_nested_type(self)
1681
+
1682
+ @property
1683
+ def arrow_schema(self):
1684
+ """
1685
+ Features schema.
1686
+
1687
+ Returns:
1688
+ :obj:`pyarrow.Schema`
1689
+ """
1690
+ hf_metadata = {"info": {"features": self.to_dict()}}
1691
+ return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
1692
+
1693
+ @classmethod
1694
+ def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
1695
+ """
1696
+ Construct [`Features`] from Arrow Schema.
1697
+ It also checks the schema metadata for Hugging Face Datasets features.
1698
+ Non-nullable fields are not supported and set to nullable.
1699
+
1700
+ Args:
1701
+ pa_schema (`pyarrow.Schema`):
1702
+ Arrow Schema.
1703
+
1704
+ Returns:
1705
+ [`Features`]
1706
+ """
1707
+ # try to load features from the arrow schema metadata
1708
+ metadata_features = Features()
1709
+ if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
1710
+ metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
1711
+ if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
1712
+ metadata_features = Features.from_dict(metadata["info"]["features"])
1713
+ metadata_features_schema = metadata_features.arrow_schema
1714
+ obj = {
1715
+ field.name: (
1716
+ metadata_features[field.name]
1717
+ if field.name in metadata_features and metadata_features_schema.field(field.name) == field
1718
+ else generate_from_arrow_type(field.type)
1719
+ )
1720
+ for field in pa_schema
1721
+ }
1722
+ return cls(**obj)
1723
+
1724
+ @classmethod
1725
+ def from_dict(cls, dic) -> "Features":
1726
+ """
1727
+ Construct [`Features`] from dict.
1728
+
1729
+ Regenerate the nested feature object from a deserialized dict.
1730
+ We use the `_type` key to infer the dataclass name of the feature `FieldType`.
1731
+
1732
+ It allows for a convenient constructor syntax
1733
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
1734
+ a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
1735
+ [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
1736
+ any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
1737
+ dtypes that [`Value`] automatically performs.
1738
+
1739
+ Args:
1740
+ dic (`dict[str, Any]`):
1741
+ Python dictionary.
1742
+
1743
+ Returns:
1744
+ `Features`
1745
+
1746
+ Example::
1747
+ >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
1748
+ {'_type': Value(dtype='string', id=None)}
1749
+ """
1750
+ obj = generate_from_dict(dic)
1751
+ return cls(**obj)
1752
+
1753
+ def to_dict(self):
1754
+ return asdict(self)
1755
+
1756
+ def _to_yaml_list(self) -> list:
1757
+ # we compute the YAML list from the dict representation that is used for JSON dump
1758
+ yaml_data = self.to_dict()
1759
+
1760
+ def simplify(feature: dict) -> dict:
1761
+ if not isinstance(feature, dict):
1762
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
1763
+
1764
+ #
1765
+ # sequence: -> sequence: int32
1766
+ # dtype: int32 ->
1767
+ #
1768
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
1769
+ feature["sequence"] = feature["sequence"]["dtype"]
1770
+
1771
+ #
1772
+ # sequence: -> sequence:
1773
+ # struct: -> - name: foo
1774
+ # - name: foo -> dtype: int32
1775
+ # dtype: int32 ->
1776
+ #
1777
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
1778
+ feature["sequence"] = feature["sequence"]["struct"]
1779
+
1780
+ #
1781
+ # list: -> list: int32
1782
+ # dtype: int32 ->
1783
+ #
1784
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
1785
+ feature["list"] = feature["list"]["dtype"]
1786
+
1787
+ #
1788
+ # list: -> list:
1789
+ # struct: -> - name: foo
1790
+ # - name: foo -> dtype: int32
1791
+ # dtype: int32 ->
1792
+ #
1793
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
1794
+ feature["list"] = feature["list"]["struct"]
1795
+
1796
+ #
1797
+ # class_label: -> class_label:
1798
+ # names: -> names:
1799
+ # - negative -> '0': negative
1800
+ # - positive -> '1': positive
1801
+ #
1802
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
1803
+ # server-side requirement: keys must be strings
1804
+ feature["class_label"]["names"] = {
1805
+ str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
1806
+ }
1807
+ return feature
1808
+
1809
+ def to_yaml_inner(obj: Union[dict, list]) -> dict:
1810
+ if isinstance(obj, dict):
1811
+ _type = obj.pop("_type", None)
1812
+ if _type == "Sequence":
1813
+ _feature = obj.pop("feature")
1814
+ return simplify({"sequence": to_yaml_inner(_feature), **obj})
1815
+ elif _type == "Value":
1816
+ return obj
1817
+ elif _type and not obj:
1818
+ return {"dtype": camelcase_to_snakecase(_type)}
1819
+ elif _type:
1820
+ return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
1821
+ else:
1822
+ return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
1823
+ elif isinstance(obj, list):
1824
+ return simplify({"list": simplify(to_yaml_inner(obj[0]))})
1825
+ elif isinstance(obj, tuple):
1826
+ return to_yaml_inner(list(obj))
1827
+ else:
1828
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
1829
+
1830
+ def to_yaml_types(obj: dict) -> dict:
1831
+ if isinstance(obj, dict):
1832
+ return {k: to_yaml_types(v) for k, v in obj.items()}
1833
+ elif isinstance(obj, list):
1834
+ return [to_yaml_types(v) for v in obj]
1835
+ elif isinstance(obj, tuple):
1836
+ return to_yaml_types(list(obj))
1837
+ else:
1838
+ return obj
1839
+
1840
+ return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
1841
+
1842
+ @classmethod
1843
+ def _from_yaml_list(cls, yaml_data: list) -> "Features":
1844
+ yaml_data = copy.deepcopy(yaml_data)
1845
+
1846
+ # we convert the list obtained from YAML data into the dict representation that is used for JSON dump
1847
+
1848
+ def unsimplify(feature: dict) -> dict:
1849
+ if not isinstance(feature, dict):
1850
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
1851
+ #
1852
+ # sequence: int32 -> sequence:
1853
+ # -> dtype: int32
1854
+ #
1855
+ if isinstance(feature.get("sequence"), str):
1856
+ feature["sequence"] = {"dtype": feature["sequence"]}
1857
+ #
1858
+ # list: int32 -> list:
1859
+ # -> dtype: int32
1860
+ #
1861
+ if isinstance(feature.get("list"), str):
1862
+ feature["list"] = {"dtype": feature["list"]}
1863
+
1864
+ #
1865
+ # class_label: -> class_label:
1866
+ # names: -> names:
1867
+ # '0': negative -> - negative
1868
+ # '1': positive -> - positive
1869
+ #
1870
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
1871
+ label_ids = sorted(feature["class_label"]["names"], key=int)
1872
+ if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
1873
+ raise ValueError(
1874
+ f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
1875
+ )
1876
+ feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
1877
+ return feature
1878
+
1879
+ def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
1880
+ if isinstance(obj, dict):
1881
+ if not obj:
1882
+ return {}
1883
+ _type = next(iter(obj))
1884
+ if _type == "sequence":
1885
+ _feature = unsimplify(obj).pop(_type)
1886
+ return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
1887
+ if _type == "list":
1888
+ return [from_yaml_inner(unsimplify(obj)[_type])]
1889
+ if _type == "struct":
1890
+ return from_yaml_inner(obj["struct"])
1891
+ elif _type == "dtype":
1892
+ if isinstance(obj["dtype"], str):
1893
+ # e.g. int32, float64, string, audio, image
1894
+ try:
1895
+ Value(obj["dtype"])
1896
+ return {**obj, "_type": "Value"}
1897
+ except ValueError:
1898
+ # e.g. Audio, Image, ArrayXD
1899
+ return {"_type": snakecase_to_camelcase(obj["dtype"])}
1900
+ else:
1901
+ return from_yaml_inner(obj["dtype"])
1902
+ else:
1903
+ return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
1904
+ elif isinstance(obj, list):
1905
+ names = [_feature.pop("name") for _feature in obj]
1906
+ return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
1907
+ else:
1908
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
1909
+
1910
+ return cls.from_dict(from_yaml_inner(yaml_data))
1911
+
1912
+ def encode_example(self, example):
1913
+ """
1914
+ Encode example into a format for Arrow.
1915
+
1916
+ Args:
1917
+ example (`dict[str, Any]`):
1918
+ Data in a Dataset row.
1919
+
1920
+ Returns:
1921
+ `dict[str, Any]`
1922
+ """
1923
+ example = cast_to_python_objects(example)
1924
+ return encode_nested_example(self, example)
1925
+
1926
+ def encode_column(self, column, column_name: str):
1927
+ """
1928
+ Encode column into a format for Arrow.
1929
+
1930
+ Args:
1931
+ column (`list[Any]`):
1932
+ Data in a Dataset column.
1933
+ column_name (`str`):
1934
+ Dataset column name.
1935
+
1936
+ Returns:
1937
+ `list[Any]`
1938
+ """
1939
+ column = cast_to_python_objects(column)
1940
+ return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
1941
+
1942
+ def encode_batch(self, batch):
1943
+ """
1944
+ Encode batch into a format for Arrow.
1945
+
1946
+ Args:
1947
+ batch (`dict[str, list[Any]]`):
1948
+ Data in a Dataset batch.
1949
+
1950
+ Returns:
1951
+ `dict[str, list[Any]]`
1952
+ """
1953
+ encoded_batch = {}
1954
+ if set(batch) != set(self):
1955
+ raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
1956
+ for key, column in batch.items():
1957
+ column = cast_to_python_objects(column)
1958
+ encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column]
1959
+ return encoded_batch
1960
+
1961
+ def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
1962
+ """Decode example with custom feature decoding.
1963
+
1964
+ Args:
1965
+ example (`dict[str, Any]`):
1966
+ Dataset row data.
1967
+ token_per_repo_id (`dict`, *optional*):
1968
+ To access and decode audio or image files from private repositories on the Hub, you can pass
1969
+ a dictionary `repo_id (str) -> token (bool or str)`.
1970
+
1971
+ Returns:
1972
+ `dict[str, Any]`
1973
+ """
1974
+
1975
+ return {
1976
+ column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
1977
+ if self._column_requires_decoding[column_name]
1978
+ else value
1979
+ for column_name, (feature, value) in zip_dict(
1980
+ {key: value for key, value in self.items() if key in example}, example
1981
+ )
1982
+ }
1983
+
1984
+ def decode_column(self, column: list, column_name: str):
1985
+ """Decode column with custom feature decoding.
1986
+
1987
+ Args:
1988
+ column (`list[Any]`):
1989
+ Dataset column data.
1990
+ column_name (`str`):
1991
+ Dataset column name.
1992
+
1993
+ Returns:
1994
+ `list[Any]`
1995
+ """
1996
+ return (
1997
+ [decode_nested_example(self[column_name], value) if value is not None else None for value in column]
1998
+ if self._column_requires_decoding[column_name]
1999
+ else column
2000
+ )
2001
+
2002
+ def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
2003
+ """Decode batch with custom feature decoding.
2004
+
2005
+ Args:
2006
+ batch (`dict[str, list[Any]]`):
2007
+ Dataset batch data.
2008
+ token_per_repo_id (`dict`, *optional*):
2009
+ To access and decode audio or image files from private repositories on the Hub, you can pass
2010
+ a dictionary repo_id (str) -> token (bool or str)
2011
+
2012
+ Returns:
2013
+ `dict[str, list[Any]]`
2014
+ """
2015
+ decoded_batch = {}
2016
+ for column_name, column in batch.items():
2017
+ decoded_batch[column_name] = (
2018
+ [
2019
+ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
2020
+ if value is not None
2021
+ else None
2022
+ for value in column
2023
+ ]
2024
+ if self._column_requires_decoding[column_name]
2025
+ else column
2026
+ )
2027
+ return decoded_batch
2028
+
2029
+ def copy(self) -> "Features":
2030
+ """
2031
+ Make a deep copy of [`Features`].
2032
+
2033
+ Returns:
2034
+ [`Features`]
2035
+
2036
+ Example:
2037
+
2038
+ ```py
2039
+ >>> from datasets import load_dataset
2040
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
2041
+ >>> copy_of_features = ds.features.copy()
2042
+ >>> copy_of_features
2043
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
2044
+ 'text': Value(dtype='string', id=None)}
2045
+ ```
2046
+ """
2047
+ return copy.deepcopy(self)
2048
+
2049
+ def reorder_fields_as(self, other: "Features") -> "Features":
2050
+ """
2051
+ Reorder Features fields to match the field order of other [`Features`].
2052
+
2053
+ The order of the fields is important since it matters for the underlying arrow data.
2054
+ Re-ordering the fields allows to make the underlying arrow data type match.
2055
+
2056
+ Args:
2057
+ other ([`Features`]):
2058
+ The other [`Features`] to align with.
2059
+
2060
+ Returns:
2061
+ [`Features`]
2062
+
2063
+ Example::
2064
+
2065
+ >>> from datasets import Features, Sequence, Value
2066
+ >>> # let's say we have to features with a different order of nested fields (for a and b for example)
2067
+ >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
2068
+ >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
2069
+ >>> assert f1.type != f2.type
2070
+ >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
2071
+ >>> f1.reorder_fields_as(f2)
2072
+ {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
2073
+ >>> assert f1.reorder_fields_as(f2).type == f2.type
2074
+ """
2075
+
2076
+ def recursive_reorder(source, target, stack=""):
2077
+ stack_position = " at " + stack[1:] if stack else ""
2078
+ if isinstance(target, Sequence):
2079
+ target = target.feature
2080
+ if isinstance(target, dict):
2081
+ target = {k: [v] for k, v in target.items()}
2082
+ else:
2083
+ target = [target]
2084
+ if isinstance(source, Sequence):
2085
+ source, id_, length = source.feature, source.id, source.length
2086
+ if isinstance(source, dict):
2087
+ source = {k: [v] for k, v in source.items()}
2088
+ reordered = recursive_reorder(source, target, stack)
2089
+ return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
2090
+ else:
2091
+ source = [source]
2092
+ reordered = recursive_reorder(source, target, stack)
2093
+ return Sequence(reordered[0], id=id_, length=length)
2094
+ elif isinstance(source, dict):
2095
+ if not isinstance(target, dict):
2096
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
2097
+ if sorted(source) != sorted(target):
2098
+ message = (
2099
+ f"Keys mismatch: between {source} (source) and {target} (target).\n"
2100
+ f"{source.keys()-target.keys()} are missing from target "
2101
+ f"and {target.keys()-source.keys()} are missing from source" + stack_position
2102
+ )
2103
+ raise ValueError(message)
2104
+ return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
2105
+ elif isinstance(source, list):
2106
+ if not isinstance(target, list):
2107
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
2108
+ if len(source) != len(target):
2109
+ raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
2110
+ return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
2111
+ else:
2112
+ return source
2113
+
2114
+ return Features(recursive_reorder(self, other))
2115
+
2116
+ def flatten(self, max_depth=16) -> "Features":
2117
+ """Flatten the features. Every dictionary column is removed and is replaced by
2118
+ all the subfields it contains. The new fields are named by concatenating the
2119
+ name of the original column and the subfield name like this: `<original>.<subfield>`.
2120
+
2121
+ If a column contains nested dictionaries, then all the lower-level subfields names are
2122
+ also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
2123
+
2124
+ Returns:
2125
+ [`Features`]:
2126
+ The flattened features.
2127
+
2128
+ Example:
2129
+
2130
+ ```py
2131
+ >>> from datasets import load_dataset
2132
+ >>> ds = load_dataset("squad", split="train")
2133
+ >>> ds.features.flatten()
2134
+ {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
2135
+ 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
2136
+ 'context': Value(dtype='string', id=None),
2137
+ 'id': Value(dtype='string', id=None),
2138
+ 'question': Value(dtype='string', id=None),
2139
+ 'title': Value(dtype='string', id=None)}
2140
+ ```
2141
+ """
2142
+ for depth in range(1, max_depth):
2143
+ no_change = True
2144
+ flattened = self.copy()
2145
+ for column_name, subfeature in self.items():
2146
+ if isinstance(subfeature, dict):
2147
+ no_change = False
2148
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
2149
+ del flattened[column_name]
2150
+ elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
2151
+ no_change = False
2152
+ flattened.update(
2153
+ {
2154
+ f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
2155
+ for k, v in subfeature.feature.items()
2156
+ }
2157
+ )
2158
+ del flattened[column_name]
2159
+ elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
2160
+ no_change = False
2161
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
2162
+ del flattened[column_name]
2163
+ self = flattened
2164
+ if no_change:
2165
+ break
2166
+ return self
2167
+
2168
+
2169
+ def _align_features(features_list: List[Features]) -> List[Features]:
2170
+ """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
2171
+ name2feature = {}
2172
+ for features in features_list:
2173
+ for k, v in features.items():
2174
+ if k in name2feature and isinstance(v, dict):
2175
+ # Recursively align features.
2176
+ name2feature[k] = _align_features([name2feature[k], v])[0]
2177
+ elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
2178
+ name2feature[k] = v
2179
+
2180
+ return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
2181
+
2182
+
2183
+ def _check_if_features_can_be_aligned(features_list: List[Features]):
2184
+ """Check if the dictionaries of features can be aligned.
2185
+
2186
+ Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
2187
+ """
2188
+ name2feature = {}
2189
+ for features in features_list:
2190
+ for k, v in features.items():
2191
+ if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
2192
+ name2feature[k] = v
2193
+
2194
+ for features in features_list:
2195
+ for k, v in features.items():
2196
+ if isinstance(v, dict) and isinstance(name2feature[k], dict):
2197
+ # Deep checks for structure.
2198
+ _check_if_features_can_be_aligned([name2feature[k], v])
2199
+ elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
2200
+ raise ValueError(
2201
+ f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
2202
+ )
venv/lib/python3.10/site-packages/datasets/features/image.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import warnings
4
+ from dataclasses import dataclass, field
5
+ from io import BytesIO
6
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
7
+
8
+ import numpy as np
9
+ import pyarrow as pa
10
+
11
+ from .. import config
12
+ from ..download.download_config import DownloadConfig
13
+ from ..table import array_cast
14
+ from ..utils.file_utils import is_local_path, xopen
15
+ from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
16
+
17
+
18
+ if TYPE_CHECKING:
19
+ import PIL.Image
20
+
21
+ from .features import FeatureType
22
+
23
+
24
+ _IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
25
+ _NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
26
+ # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
27
+ _VALID_IMAGE_ARRAY_DTPYES = [
28
+ np.dtype("|b1"),
29
+ np.dtype("|u1"),
30
+ np.dtype("<u2"),
31
+ np.dtype(">u2"),
32
+ np.dtype("<i2"),
33
+ np.dtype(">i2"),
34
+ np.dtype("<u4"),
35
+ np.dtype(">u4"),
36
+ np.dtype("<i4"),
37
+ np.dtype(">i4"),
38
+ np.dtype("<f4"),
39
+ np.dtype(">f4"),
40
+ np.dtype("<f8"),
41
+ np.dtype(">f8"),
42
+ ]
43
+
44
+
45
+ @dataclass
46
+ class Image:
47
+ """Image [`Feature`] to read image data from an image file.
48
+
49
+ Input: The Image feature accepts as input:
50
+ - A `str`: Absolute path to the image file (i.e. random access is allowed).
51
+ - A `dict` with the keys:
52
+
53
+ - `path`: String with relative path of the image file to the archive file.
54
+ - `bytes`: Bytes of the image file.
55
+
56
+ This is useful for archived files with sequential access.
57
+
58
+ - An `np.ndarray`: NumPy array representing an image.
59
+ - A `PIL.Image.Image`: PIL image object.
60
+
61
+ Args:
62
+ mode (`str`, *optional*):
63
+ The mode to convert the image to. If `None`, the native mode of the image is used.
64
+ decode (`bool`, defaults to `True`):
65
+ Whether to decode the image data. If `False`,
66
+ returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
67
+
68
+ Examples:
69
+
70
+ ```py
71
+ >>> from datasets import load_dataset, Image
72
+ >>> ds = load_dataset("beans", split="train")
73
+ >>> ds.features["image"]
74
+ Image(decode=True, id=None)
75
+ >>> ds[0]["image"]
76
+ <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
77
+ >>> ds = ds.cast_column('image', Image(decode=False))
78
+ {'bytes': None,
79
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
80
+ ```
81
+ """
82
+
83
+ mode: Optional[str] = None
84
+ decode: bool = True
85
+ id: Optional[str] = None
86
+ # Automatically constructed
87
+ dtype: ClassVar[str] = "PIL.Image.Image"
88
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
89
+ _type: str = field(default="Image", init=False, repr=False)
90
+
91
+ def __call__(self):
92
+ return self.pa_type
93
+
94
+ def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
95
+ """Encode example into a format for Arrow.
96
+
97
+ Args:
98
+ value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
99
+ Data passed as input to Image feature.
100
+
101
+ Returns:
102
+ `dict` with "path" and "bytes" fields
103
+ """
104
+ if config.PIL_AVAILABLE:
105
+ import PIL.Image
106
+ else:
107
+ raise ImportError("To support encoding images, please install 'Pillow'.")
108
+
109
+ if isinstance(value, list):
110
+ value = np.array(value)
111
+
112
+ if isinstance(value, str):
113
+ return {"path": value, "bytes": None}
114
+ elif isinstance(value, bytes):
115
+ return {"path": None, "bytes": value}
116
+ elif isinstance(value, np.ndarray):
117
+ # convert the image array to PNG/TIFF bytes
118
+ return encode_np_array(value)
119
+ elif isinstance(value, PIL.Image.Image):
120
+ # convert the PIL image to bytes (default format is PNG/TIFF)
121
+ return encode_pil_image(value)
122
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
123
+ # we set "bytes": None to not duplicate the data if they're already available locally
124
+ return {"bytes": None, "path": value.get("path")}
125
+ elif value.get("bytes") is not None or value.get("path") is not None:
126
+ # store the image bytes, and path is used to infer the image format using the file extension
127
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
128
+ else:
129
+ raise ValueError(
130
+ f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
131
+ )
132
+
133
+ def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
134
+ """Decode example image file into image data.
135
+
136
+ Args:
137
+ value (`str` or `dict`):
138
+ A string with the absolute image file path, a dictionary with
139
+ keys:
140
+
141
+ - `path`: String with absolute or relative image file path.
142
+ - `bytes`: The bytes of the image file.
143
+ token_per_repo_id (`dict`, *optional*):
144
+ To access and decode
145
+ image files from private repositories on the Hub, you can pass
146
+ a dictionary repo_id (`str`) -> token (`bool` or `str`).
147
+
148
+ Returns:
149
+ `PIL.Image.Image`
150
+ """
151
+ if not self.decode:
152
+ raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
153
+
154
+ if config.PIL_AVAILABLE:
155
+ import PIL.Image
156
+ import PIL.ImageOps
157
+ else:
158
+ raise ImportError("To support decoding images, please install 'Pillow'.")
159
+
160
+ if token_per_repo_id is None:
161
+ token_per_repo_id = {}
162
+
163
+ path, bytes_ = value["path"], value["bytes"]
164
+ if bytes_ is None:
165
+ if path is None:
166
+ raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
167
+ else:
168
+ if is_local_path(path):
169
+ image = PIL.Image.open(path)
170
+ else:
171
+ source_url = path.split("::")[-1]
172
+ pattern = (
173
+ config.HUB_DATASETS_URL
174
+ if source_url.startswith(config.HF_ENDPOINT)
175
+ else config.HUB_DATASETS_HFFS_URL
176
+ )
177
+ try:
178
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
179
+ token = token_per_repo_id.get(repo_id)
180
+ except ValueError:
181
+ token = None
182
+ download_config = DownloadConfig(token=token)
183
+ with xopen(path, "rb", download_config=download_config) as f:
184
+ bytes_ = BytesIO(f.read())
185
+ image = PIL.Image.open(bytes_)
186
+ else:
187
+ image = PIL.Image.open(BytesIO(bytes_))
188
+ image.load() # to avoid "Too many open files" errors
189
+ if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None:
190
+ image = PIL.ImageOps.exif_transpose(image)
191
+ if self.mode and self.mode != image.mode:
192
+ image = image.convert(self.mode)
193
+ return image
194
+
195
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
196
+ """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
197
+ from .features import Value
198
+
199
+ return (
200
+ self
201
+ if self.decode
202
+ else {
203
+ "bytes": Value("binary"),
204
+ "path": Value("string"),
205
+ }
206
+ )
207
+
208
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
209
+ """Cast an Arrow array to the Image arrow storage type.
210
+ The Arrow types that can be converted to the Image pyarrow storage type are:
211
+
212
+ - `pa.string()` - it must contain the "path" data
213
+ - `pa.binary()` - it must contain the image bytes
214
+ - `pa.struct({"bytes": pa.binary()})`
215
+ - `pa.struct({"path": pa.string()})`
216
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
217
+ - `pa.list(*)` - it must contain the image array data
218
+
219
+ Args:
220
+ storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
221
+ PyArrow array to cast.
222
+
223
+ Returns:
224
+ `pa.StructArray`: Array in the Image arrow storage type, that is
225
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
226
+ """
227
+ if pa.types.is_string(storage.type):
228
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
229
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
230
+ elif pa.types.is_binary(storage.type):
231
+ path_array = pa.array([None] * len(storage), type=pa.string())
232
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
233
+ elif pa.types.is_struct(storage.type):
234
+ if storage.type.get_field_index("bytes") >= 0:
235
+ bytes_array = storage.field("bytes")
236
+ else:
237
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
238
+ if storage.type.get_field_index("path") >= 0:
239
+ path_array = storage.field("path")
240
+ else:
241
+ path_array = pa.array([None] * len(storage), type=pa.string())
242
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
243
+ elif pa.types.is_list(storage.type):
244
+ bytes_array = pa.array(
245
+ [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
246
+ type=pa.binary(),
247
+ )
248
+ path_array = pa.array([None] * len(storage), type=pa.string())
249
+ storage = pa.StructArray.from_arrays(
250
+ [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
251
+ )
252
+ return array_cast(storage, self.pa_type)
253
+
254
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
255
+ """Embed image files into the Arrow array.
256
+
257
+ Args:
258
+ storage (`pa.StructArray`):
259
+ PyArrow array to embed.
260
+
261
+ Returns:
262
+ `pa.StructArray`: Array in the Image arrow storage type, that is
263
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
264
+ """
265
+
266
+ @no_op_if_value_is_null
267
+ def path_to_bytes(path):
268
+ with xopen(path, "rb") as f:
269
+ bytes_ = f.read()
270
+ return bytes_
271
+
272
+ bytes_array = pa.array(
273
+ [
274
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
275
+ for x in storage.to_pylist()
276
+ ],
277
+ type=pa.binary(),
278
+ )
279
+ path_array = pa.array(
280
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
281
+ type=pa.string(),
282
+ )
283
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
284
+ return array_cast(storage, self.pa_type)
285
+
286
+
287
+ def list_image_compression_formats() -> List[str]:
288
+ if config.PIL_AVAILABLE:
289
+ import PIL.Image
290
+ else:
291
+ raise ImportError("To support encoding images, please install 'Pillow'.")
292
+
293
+ global _IMAGE_COMPRESSION_FORMATS
294
+ if _IMAGE_COMPRESSION_FORMATS is None:
295
+ PIL.Image.init()
296
+ _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
297
+ return _IMAGE_COMPRESSION_FORMATS
298
+
299
+
300
+ def image_to_bytes(image: "PIL.Image.Image") -> bytes:
301
+ """Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
302
+ buffer = BytesIO()
303
+ if image.format in list_image_compression_formats():
304
+ format = image.format
305
+ else:
306
+ format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
307
+ image.save(buffer, format=format)
308
+ return buffer.getvalue()
309
+
310
+
311
+ def encode_pil_image(image: "PIL.Image.Image") -> dict:
312
+ if hasattr(image, "filename") and image.filename != "":
313
+ return {"path": image.filename, "bytes": None}
314
+ else:
315
+ return {"path": None, "bytes": image_to_bytes(image)}
316
+
317
+
318
+ def encode_np_array(array: np.ndarray) -> dict:
319
+ if config.PIL_AVAILABLE:
320
+ import PIL.Image
321
+ else:
322
+ raise ImportError("To support encoding images, please install 'Pillow'.")
323
+
324
+ dtype = array.dtype
325
+ dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
326
+ dtype_kind = dtype.kind
327
+ dtype_itemsize = dtype.itemsize
328
+
329
+ dest_dtype = None
330
+
331
+ # Multi-channel array case (only np.dtype("|u1") is allowed)
332
+ if array.shape[2:]:
333
+ if dtype_kind not in ["u", "i"]:
334
+ raise TypeError(
335
+ f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
336
+ )
337
+ dest_dtype = np.dtype("|u1")
338
+ if dtype != dest_dtype:
339
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
340
+ # Exact match
341
+ elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
342
+ dest_dtype = dtype
343
+ else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
344
+ while dtype_itemsize >= 1:
345
+ dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
346
+ if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
347
+ dest_dtype = np.dtype(dtype_str)
348
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
349
+ break
350
+ else:
351
+ dtype_itemsize //= 2
352
+ if dest_dtype is None:
353
+ raise TypeError(
354
+ f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
355
+ )
356
+
357
+ image = PIL.Image.fromarray(array.astype(dest_dtype))
358
+ return {"path": None, "bytes": image_to_bytes(image)}
359
+
360
+
361
+ def objects_to_list_of_image_dicts(
362
+ objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
363
+ ) -> List[dict]:
364
+ """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
365
+ if config.PIL_AVAILABLE:
366
+ import PIL.Image
367
+ else:
368
+ raise ImportError("To support encoding images, please install 'Pillow'.")
369
+
370
+ if objs:
371
+ _, obj = first_non_null_value(objs)
372
+ if isinstance(obj, str):
373
+ return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
374
+ if isinstance(obj, np.ndarray):
375
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
376
+ return [obj_to_image_dict_func(obj) for obj in objs]
377
+ elif isinstance(obj, PIL.Image.Image):
378
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
379
+ return [obj_to_image_dict_func(obj) for obj in objs]
380
+ else:
381
+ return objs
382
+ else:
383
+ return objs
venv/lib/python3.10/site-packages/datasets/features/translation.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
3
+
4
+ import pyarrow as pa
5
+
6
+
7
+ if TYPE_CHECKING:
8
+ from .features import FeatureType
9
+
10
+
11
+ @dataclass
12
+ class Translation:
13
+ """`FeatureConnector` for translations with fixed languages per example.
14
+ Here for compatiblity with tfds.
15
+
16
+ Args:
17
+ languages (`dict`):
18
+ A dictionary for each example mapping string language codes to string translations.
19
+
20
+ Example:
21
+
22
+ ```python
23
+ >>> # At construction time:
24
+ >>> datasets.features.Translation(languages=['en', 'fr', 'de'])
25
+ >>> # During data generation:
26
+ >>> yield {
27
+ ... 'en': 'the cat',
28
+ ... 'fr': 'le chat',
29
+ ... 'de': 'die katze'
30
+ ... }
31
+ ```
32
+ """
33
+
34
+ languages: List[str]
35
+ id: Optional[str] = None
36
+ # Automatically constructed
37
+ dtype: ClassVar[str] = "dict"
38
+ pa_type: ClassVar[Any] = None
39
+ _type: str = field(default="Translation", init=False, repr=False)
40
+
41
+ def __call__(self):
42
+ return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
43
+
44
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
45
+ """Flatten the Translation feature into a dictionary."""
46
+ from .features import Value
47
+
48
+ return {k: Value("string") for k in sorted(self.languages)}
49
+
50
+
51
+ @dataclass
52
+ class TranslationVariableLanguages:
53
+ """`FeatureConnector` for translations with variable languages per example.
54
+ Here for compatiblity with tfds.
55
+
56
+ Args:
57
+ languages (`dict`):
58
+ A dictionary for each example mapping string language codes to one or more string translations.
59
+ The languages present may vary from example to example.
60
+
61
+ Returns:
62
+ - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
63
+ Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
64
+
65
+ Example:
66
+
67
+ ```python
68
+ >>> # At construction time:
69
+ >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
70
+ >>> # During data generation:
71
+ >>> yield {
72
+ ... 'en': 'the cat',
73
+ ... 'fr': ['le chat', 'la chatte,']
74
+ ... 'de': 'die katze'
75
+ ... }
76
+ >>> # Tensor returned :
77
+ >>> {
78
+ ... 'language': ['en', 'de', 'fr', 'fr'],
79
+ ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
80
+ ... }
81
+ ```
82
+ """
83
+
84
+ languages: Optional[List] = None
85
+ num_languages: Optional[int] = None
86
+ id: Optional[str] = None
87
+ # Automatically constructed
88
+ dtype: ClassVar[str] = "dict"
89
+ pa_type: ClassVar[Any] = None
90
+ _type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
91
+
92
+ def __post_init__(self):
93
+ self.languages = sorted(set(self.languages)) if self.languages else None
94
+ self.num_languages = len(self.languages) if self.languages else None
95
+
96
+ def __call__(self):
97
+ return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
98
+
99
+ def encode_example(self, translation_dict):
100
+ lang_set = set(self.languages)
101
+ if set(translation_dict) == {"language", "translation"}:
102
+ return translation_dict
103
+ elif self.languages and set(translation_dict) - lang_set:
104
+ raise ValueError(
105
+ f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
106
+ )
107
+
108
+ # Convert dictionary into tuples, splitting out cases where there are
109
+ # multiple translations for a single language.
110
+ translation_tuples = []
111
+ for lang, text in translation_dict.items():
112
+ if isinstance(text, str):
113
+ translation_tuples.append((lang, text))
114
+ else:
115
+ translation_tuples.extend([(lang, el) for el in text])
116
+
117
+ # Ensure translations are in ascending order by language code.
118
+ languages, translations = zip(*sorted(translation_tuples))
119
+
120
+ return {"language": languages, "translation": translations}
121
+
122
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
123
+ """Flatten the TranslationVariableLanguages feature into a dictionary."""
124
+ from .features import Sequence, Value
125
+
126
+ return {
127
+ "language": Sequence(Value("string")),
128
+ "translation": Sequence(Value("string")),
129
+ }
venv/lib/python3.10/site-packages/datasets/filesystems/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import shutil
3
+ import warnings
4
+ from typing import List
5
+
6
+ import fsspec
7
+ import fsspec.asyn
8
+ from fsspec.implementations.local import LocalFileSystem
9
+
10
+ from ..utils.deprecation_utils import deprecated
11
+ from . import compression
12
+
13
+
14
+ _has_s3fs = importlib.util.find_spec("s3fs") is not None
15
+
16
+ if _has_s3fs:
17
+ from .s3filesystem import S3FileSystem # noqa: F401
18
+
19
+ COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
20
+ compression.Bz2FileSystem,
21
+ compression.GzipFileSystem,
22
+ compression.Lz4FileSystem,
23
+ compression.XzFileSystem,
24
+ compression.ZstdFileSystem,
25
+ ]
26
+
27
+ # Register custom filesystems
28
+ for fs_class in COMPRESSION_FILESYSTEMS:
29
+ if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
30
+ warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
31
+ fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
32
+
33
+
34
+ @deprecated(
35
+ "This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
36
+ )
37
+ def extract_path_from_uri(dataset_path: str) -> str:
38
+ """
39
+ Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
40
+
41
+ Args:
42
+ dataset_path (`str`):
43
+ Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
44
+ """
45
+ if "://" in dataset_path:
46
+ dataset_path = dataset_path.split("://")[1]
47
+ return dataset_path
48
+
49
+
50
+ def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
51
+ """
52
+ Checks if `fs` is a remote filesystem.
53
+
54
+ Args:
55
+ fs (`fsspec.spec.AbstractFileSystem`):
56
+ An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
57
+ """
58
+ return not isinstance(fs, LocalFileSystem)
59
+
60
+
61
+ def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
62
+ """
63
+ Renames the file `src` in `fs` to `dst`.
64
+ """
65
+ if not is_remote_filesystem(fs):
66
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
67
+ shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
68
+ else:
69
+ fs.mv(src, dst, recursive=True)
venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc ADDED
Binary file (4.23 kB). View file
 
venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc ADDED
Binary file (6.06 kB). View file
 
venv/lib/python3.10/site-packages/datasets/filesystems/compression.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Optional
3
+
4
+ import fsspec
5
+ from fsspec.archive import AbstractArchiveFileSystem
6
+
7
+
8
+ class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
9
+ """Read contents of compressed file as a filesystem with one file inside."""
10
+
11
+ root_marker = ""
12
+ protocol: str = (
13
+ None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
14
+ )
15
+ compression: str = None # compression type in fsspec. ex: "gzip"
16
+ extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
17
+
18
+ def __init__(
19
+ self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
20
+ ):
21
+ """
22
+ The compressed file system can be instantiated from any compressed file.
23
+ It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
24
+
25
+ The single file inside the filesystem is named after the compresssed file,
26
+ without the compression extension at the end of the filename.
27
+
28
+ Args:
29
+ fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
30
+ mode (:obj:``str``): Currently, only 'rb' accepted
31
+ target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
32
+ target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
33
+ """
34
+ super().__init__(self, **kwargs)
35
+ # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
36
+ self.file = fsspec.open(
37
+ fo,
38
+ mode="rb",
39
+ protocol=target_protocol,
40
+ compression=self.compression,
41
+ client_kwargs={
42
+ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
43
+ "trust_env": True, # Enable reading proxy env variables.
44
+ **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
45
+ },
46
+ **(target_options or {}),
47
+ )
48
+ self.compressed_name = os.path.basename(self.file.path.split("::")[0])
49
+ self.uncompressed_name = (
50
+ self.compressed_name[: self.compressed_name.rindex(".")]
51
+ if "." in self.compressed_name
52
+ else self.compressed_name
53
+ )
54
+ self.dir_cache = None
55
+
56
+ @classmethod
57
+ def _strip_protocol(cls, path):
58
+ # compressed file paths are always relative to the archive root
59
+ return super()._strip_protocol(path).lstrip("/")
60
+
61
+ def _get_dirs(self):
62
+ if self.dir_cache is None:
63
+ f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
64
+ self.dir_cache = {f["name"]: f}
65
+
66
+ def cat(self, path: str):
67
+ return self.file.open().read()
68
+
69
+ def _open(
70
+ self,
71
+ path: str,
72
+ mode: str = "rb",
73
+ block_size=None,
74
+ autocommit=True,
75
+ cache_options=None,
76
+ **kwargs,
77
+ ):
78
+ path = self._strip_protocol(path)
79
+ if mode != "rb":
80
+ raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
81
+ return self.file.open()
82
+
83
+
84
+ class Bz2FileSystem(BaseCompressedFileFileSystem):
85
+ """Read contents of BZ2 file as a filesystem with one file inside."""
86
+
87
+ protocol = "bz2"
88
+ compression = "bz2"
89
+ extension = ".bz2"
90
+
91
+
92
+ class GzipFileSystem(BaseCompressedFileFileSystem):
93
+ """Read contents of GZIP file as a filesystem with one file inside."""
94
+
95
+ protocol = "gzip"
96
+ compression = "gzip"
97
+ extension = ".gz"
98
+
99
+
100
+ class Lz4FileSystem(BaseCompressedFileFileSystem):
101
+ """Read contents of LZ4 file as a filesystem with one file inside."""
102
+
103
+ protocol = "lz4"
104
+ compression = "lz4"
105
+ extension = ".lz4"
106
+
107
+
108
+ class XzFileSystem(BaseCompressedFileFileSystem):
109
+ """Read contents of .xz (LZMA) file as a filesystem with one file inside."""
110
+
111
+ protocol = "xz"
112
+ compression = "xz"
113
+ extension = ".xz"
114
+
115
+
116
+ class ZstdFileSystem(BaseCompressedFileFileSystem):
117
+ """
118
+ Read contents of .zstd file as a filesystem with one file inside.
119
+ """
120
+
121
+ protocol = "zstd"
122
+ compression = "zstd"
123
+ extension = ".zst"
venv/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import s3fs
2
+
3
+ from ..utils.deprecation_utils import deprecated
4
+
5
+
6
+ @deprecated("Use s3fs.S3FileSystem instead.")
7
+ class S3FileSystem(s3fs.S3FileSystem):
8
+ """
9
+ `datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html).
10
+
11
+ Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`.
12
+
13
+ Args:
14
+ anon (`bool`, default to `False`):
15
+ Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given,
16
+ or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order).
17
+ key (`str`):
18
+ If not anonymous, use this access key ID, if specified.
19
+ secret (`str`):
20
+ If not anonymous, use this secret access key, if specified.
21
+ token (`str`):
22
+ If not anonymous, use this security token, if specified.
23
+ use_ssl (`bool`, defaults to `True`):
24
+ Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is
25
+ also set in `client_kwargs`, the value set in `client_kwargs` will take priority.
26
+ s3_additional_kwargs (`dict`):
27
+ Parameters that are used when calling S3 API methods. Typically used for things
28
+ like ServerSideEncryption.
29
+ client_kwargs (`dict`):
30
+ Parameters for the botocore client.
31
+ requester_pays (`bool`, defaults to `False`):
32
+ Whether `RequesterPays` buckets are supported.
33
+ default_block_size (`int`):
34
+ If given, the default block size value used for `open()`, if no specific value is given at all time.
35
+ The built-in default is 5MB.
36
+ default_fill_cache (`bool`, defaults to `True`):
37
+ Whether to use cache filling with open by default. Refer to `S3File.open`.
38
+ default_cache_type (`str`, defaults to `bytes`):
39
+ If given, the default `cache_type` value used for `open()`. Set to `none` if no
40
+ caching is desired. See fsspec's documentation for other available `cache_type` values.
41
+ version_aware (`bool`, defaults to `False`):
42
+ Whether to support bucket versioning. If enable this will require the user to have
43
+ the necessary IAM permissions for dealing with versioned objects.
44
+ cache_regions (`bool`, defaults to `False`):
45
+ Whether to cache bucket regions. Whenever a new bucket is used, it will
46
+ first find out which region it belongs to and then use the client for that region.
47
+ asynchronous (`bool`, defaults to `False`):
48
+ Whether this instance is to be used from inside coroutines.
49
+ config_kwargs (`dict`):
50
+ Parameters passed to `botocore.client.Config`.
51
+ **kwargs:
52
+ Other parameters for core session.
53
+ session (`aiobotocore.session.AioSession`):
54
+ Session to be used for all connections. This session will be used inplace of creating
55
+ a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`.
56
+ skip_instance_cache (`bool`):
57
+ Control reuse of instances. Passed on to `fsspec`.
58
+ use_listings_cache (`bool`):
59
+ Control reuse of directory listings. Passed on to `fsspec`.
60
+ listings_expiry_time (`int` or `float`):
61
+ Control reuse of directory listings. Passed on to `fsspec`.
62
+ max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`.
63
+
64
+ Examples:
65
+
66
+ Listing files from public S3 bucket.
67
+
68
+ ```py
69
+ >>> import datasets
70
+ >>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP
71
+ >>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP
72
+ ['dataset_info.json.json','dataset.arrow','state.json']
73
+ ```
74
+
75
+ Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`.
76
+
77
+ ```py
78
+ >>> import datasets
79
+ >>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
80
+ >>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP
81
+ ['dataset_info.json.json','dataset.arrow','state.json']
82
+ ```
83
+
84
+ Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`.
85
+
86
+ ```py
87
+ >>> import botocore
88
+ >>> from datasets.filesystems import S3Filesystem
89
+
90
+ >>> s3_session = botocore.session.Session(profile_name='my_profile_name')
91
+ >>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP
92
+ ```
93
+
94
+ Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`].
95
+
96
+ ```py
97
+ >>> from datasets import load_from_disk
98
+ >>> from datasets.filesystems import S3Filesystem
99
+
100
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
101
+ >>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
102
+ >>> print(len(dataset))
103
+ 25000
104
+ ```
105
+
106
+ Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`].
107
+
108
+ ```py
109
+ >>> from datasets import load_dataset
110
+ >>> from datasets.filesystems import S3Filesystem
111
+
112
+ >>> dataset = load_dataset("imdb")
113
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
114
+ >>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
115
+ ```
116
+ """
venv/lib/python3.10/site-packages/datasets/fingerprint.py ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import os
3
+ import random
4
+ import shutil
5
+ import tempfile
6
+ import weakref
7
+ from functools import wraps
8
+ from pathlib import Path
9
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
10
+
11
+ import numpy as np
12
+ import xxhash
13
+
14
+ from . import config
15
+ from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH
16
+ from .utils._dill import dumps
17
+ from .utils.deprecation_utils import deprecated
18
+ from .utils.logging import get_logger
19
+
20
+
21
+ if TYPE_CHECKING:
22
+ from .arrow_dataset import Dataset
23
+
24
+
25
+ logger = get_logger(__name__)
26
+
27
+
28
+ # Fingerprinting allows to have one deterministic fingerprint per dataset state.
29
+ # A dataset fingerprint is updated after each transform.
30
+ # Re-running the same transforms on a dataset in a different session results in the same fingerprint.
31
+ # This is possible thanks to a custom hashing function that works with most python objects.
32
+
33
+ # Fingerprinting is the main mechanism that enables caching.
34
+ # The caching mechanism allows to reload an existing cache file if it's already been computed.
35
+
36
+
37
+ #################
38
+ # Caching
39
+ #################
40
+
41
+ _CACHING_ENABLED = True
42
+ _TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempCacheDir"] = None
43
+ _DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None
44
+
45
+
46
+ class _TempCacheDir:
47
+ """
48
+ A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files
49
+ before deleting the directory itself to avoid permission errors on Windows.
50
+ """
51
+
52
+ def __init__(self):
53
+ self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX)
54
+ self._finalizer = weakref.finalize(self, self._cleanup)
55
+
56
+ def _cleanup(self):
57
+ for dset in get_datasets_with_cache_file_in_temp_dir():
58
+ dset.__del__()
59
+ if os.path.exists(self.name):
60
+ try:
61
+ shutil.rmtree(self.name)
62
+ except Exception as e:
63
+ raise OSError(
64
+ f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually."
65
+ ) from e
66
+
67
+ def cleanup(self):
68
+ if self._finalizer.detach():
69
+ self._cleanup()
70
+
71
+
72
+ def maybe_register_dataset_for_temp_dir_deletion(dataset):
73
+ """
74
+ This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order
75
+ to properly delete them before deleting the temporary directory.
76
+ The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled.
77
+ """
78
+ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
79
+ return
80
+
81
+ global _DATASETS_WITH_TABLE_IN_TEMP_DIR
82
+ if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None:
83
+ _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet()
84
+ if any(
85
+ Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents
86
+ for cache_file in dataset.cache_files
87
+ ):
88
+ _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset)
89
+
90
+
91
+ def get_datasets_with_cache_file_in_temp_dir():
92
+ return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else []
93
+
94
+
95
+ def enable_caching():
96
+ """
97
+ When applying transforms on a dataset, the data are stored in cache files.
98
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
99
+
100
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
101
+ after each transform.
102
+
103
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
104
+ More precisely, if the caching is disabled:
105
+ - cache files are always recreated
106
+ - cache files are written to a temporary directory that is deleted when session closes
107
+ - cache files are named using a random hash instead of the dataset fingerprint
108
+ - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
109
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
110
+ the `download_mode` parameter in [`~datasets.load_dataset`].
111
+ """
112
+ global _CACHING_ENABLED
113
+ _CACHING_ENABLED = True
114
+
115
+
116
+ def disable_caching():
117
+ """
118
+ When applying transforms on a dataset, the data are stored in cache files.
119
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
120
+
121
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
122
+ after each transform.
123
+
124
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
125
+ More precisely, if the caching is disabled:
126
+ - cache files are always recreated
127
+ - cache files are written to a temporary directory that is deleted when session closes
128
+ - cache files are named using a random hash instead of the dataset fingerprint
129
+ - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
130
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
131
+ the `download_mode` parameter in [`~datasets.load_dataset`].
132
+ """
133
+ global _CACHING_ENABLED
134
+ _CACHING_ENABLED = False
135
+
136
+
137
+ @deprecated(
138
+ "Use datasets.enable_caching() or datasets.disable_caching() instead. This function will be removed in a future version of datasets."
139
+ )
140
+ def set_caching_enabled(boolean: bool):
141
+ """
142
+ When applying transforms on a dataset, the data are stored in cache files.
143
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
144
+
145
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
146
+ after each transform.
147
+
148
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
149
+ More precisely, if the caching is disabled:
150
+ - cache files are always recreated
151
+ - cache files are written to a temporary directory that is deleted when session closes
152
+ - cache files are named using a random hash instead of the dataset fingerprint
153
+ - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes
154
+ - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use
155
+ the ``download_mode`` parameter in :func:`datasets.load_dataset`.
156
+ """
157
+ global _CACHING_ENABLED
158
+ _CACHING_ENABLED = bool(boolean)
159
+
160
+
161
+ def is_caching_enabled() -> bool:
162
+ """
163
+ When applying transforms on a dataset, the data are stored in cache files.
164
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
165
+
166
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
167
+ after each transform.
168
+
169
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
170
+ More precisely, if the caching is disabled:
171
+ - cache files are always recreated
172
+ - cache files are written to a temporary directory that is deleted when session closes
173
+ - cache files are named using a random hash instead of the dataset fingerprint
174
+ - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes
175
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
176
+ the `download_mode` parameter in [`~datasets.load_dataset`].
177
+ """
178
+ global _CACHING_ENABLED
179
+ return bool(_CACHING_ENABLED)
180
+
181
+
182
+ def get_temporary_cache_files_directory() -> str:
183
+ """Return a directory that is deleted when session closes."""
184
+ global _TEMP_DIR_FOR_TEMP_CACHE_FILES
185
+ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
186
+ _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempCacheDir()
187
+ return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name
188
+
189
+
190
+ #################
191
+ # Hashing
192
+ #################
193
+
194
+
195
+ @deprecated("Use `copyreg.pickle` to register a custom reducer.")
196
+ def hashregister(*types):
197
+ def proxy(func):
198
+ for t in types:
199
+ Hasher.dispatch[t] = func
200
+ return func
201
+
202
+ return proxy
203
+
204
+
205
+ class Hasher:
206
+ """Hasher that accepts python objects as inputs."""
207
+
208
+ dispatch: Dict = {}
209
+
210
+ def __init__(self):
211
+ self.m = xxhash.xxh64()
212
+
213
+ @classmethod
214
+ def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:
215
+ value = [value] if isinstance(value, bytes) else value
216
+ m = xxhash.xxh64()
217
+ for x in value:
218
+ m.update(x)
219
+ return m.hexdigest()
220
+
221
+ @classmethod
222
+ @deprecated("Use `Hasher.hash` instead.")
223
+ def hash_default(cls, value: Any) -> str:
224
+ return cls.hash(value)
225
+
226
+ @classmethod
227
+ def hash(cls, value: Any) -> str:
228
+ return cls.hash_bytes(dumps(value))
229
+
230
+ def update(self, value: Any) -> None:
231
+ header_for_update = f"=={type(value)}=="
232
+ value_for_update = self.hash(value)
233
+ self.m.update(header_for_update.encode("utf8"))
234
+ self.m.update(value_for_update.encode("utf-8"))
235
+
236
+ def hexdigest(self) -> str:
237
+ return self.m.hexdigest()
238
+
239
+
240
+ #################
241
+ # Fingerprinting
242
+ #################
243
+
244
+ fingerprint_rng = random.Random()
245
+ # we show a warning only once when fingerprinting fails to avoid spam
246
+ fingerprint_warnings: Dict[str, bool] = {}
247
+
248
+
249
+ def generate_fingerprint(dataset: "Dataset") -> str:
250
+ state = dataset.__dict__
251
+ hasher = Hasher()
252
+ for key in sorted(state):
253
+ if key == "_fingerprint":
254
+ continue
255
+ hasher.update(key)
256
+ hasher.update(state[key])
257
+ # hash data files last modification timestamps as well
258
+ for cache_file in dataset.cache_files:
259
+ hasher.update(os.path.getmtime(cache_file["filename"]))
260
+ return hasher.hexdigest()
261
+
262
+
263
+ def generate_random_fingerprint(nbits: int = 64) -> str:
264
+ return f"{fingerprint_rng.getrandbits(nbits):0{nbits//4}x}"
265
+
266
+
267
+ def update_fingerprint(fingerprint, transform, transform_args):
268
+ global fingerprint_warnings
269
+ hasher = Hasher()
270
+ hasher.update(fingerprint)
271
+ try:
272
+ hasher.update(transform)
273
+ except: # noqa various errors might raise here from pickle or dill
274
+ if _CACHING_ENABLED:
275
+ if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
276
+ logger.warning(
277
+ f"Transform {transform} couldn't be hashed properly, a random hash was used instead. "
278
+ "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
279
+ "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
280
+ "This warning is only showed once. Subsequent hashing failures won't be showed."
281
+ )
282
+ fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
283
+ else:
284
+ logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.")
285
+ else:
286
+ logger.info(
287
+ f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
288
+ )
289
+
290
+ return generate_random_fingerprint()
291
+ for key in sorted(transform_args):
292
+ hasher.update(key)
293
+ try:
294
+ hasher.update(transform_args[key])
295
+ except: # noqa various errors might raise here from pickle or dill
296
+ if _CACHING_ENABLED:
297
+ if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
298
+ logger.warning(
299
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. "
300
+ "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
301
+ "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
302
+ "This warning is only showed once. Subsequent hashing failures won't be showed."
303
+ )
304
+ fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
305
+ else:
306
+ logger.info(
307
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead."
308
+ )
309
+ else:
310
+ logger.info(
311
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
312
+ )
313
+ return generate_random_fingerprint()
314
+ return hasher.hexdigest()
315
+
316
+
317
+ def validate_fingerprint(fingerprint: str, max_length=64):
318
+ """
319
+ Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default,
320
+ so that the fingerprint can be used to name cache files without issues.
321
+ """
322
+ if not isinstance(fingerprint, str) or not fingerprint:
323
+ raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.")
324
+ for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
325
+ if invalid_char in fingerprint:
326
+ raise ValueError(
327
+ f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. "
328
+ f"They could create issues when creating cache files."
329
+ )
330
+ if len(fingerprint) > max_length:
331
+ raise ValueError(
332
+ f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}."
333
+ "It could create issues when creating cache files."
334
+ )
335
+
336
+
337
+ def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str:
338
+ """
339
+ Format a transform to the format that will be used to update the fingerprint.
340
+ """
341
+ transform = f"{func.__module__}.{func.__qualname__}"
342
+ if version is not None:
343
+ transform += f"@{version}"
344
+ return transform
345
+
346
+
347
+ def format_kwargs_for_fingerprint(
348
+ func: Callable,
349
+ args: Tuple,
350
+ kwargs: Dict[str, Any],
351
+ use_kwargs: Optional[List[str]] = None,
352
+ ignore_kwargs: Optional[List[str]] = None,
353
+ randomized_function: bool = False,
354
+ ) -> Dict[str, Any]:
355
+ """
356
+ Format the kwargs of a transform to the format that will be used to update the fingerprint.
357
+ """
358
+ kwargs_for_fingerprint = kwargs.copy()
359
+ if args:
360
+ params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD]
361
+ args = args[1:] # assume the first argument is the dataset
362
+ params = params[1:]
363
+ kwargs_for_fingerprint.update(zip(params, args))
364
+ else:
365
+ del kwargs_for_fingerprint[
366
+ next(iter(inspect.signature(func).parameters))
367
+ ] # assume the first key is the dataset
368
+
369
+ # keep the right kwargs to be hashed to generate the fingerprint
370
+
371
+ if use_kwargs:
372
+ kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs}
373
+ if ignore_kwargs:
374
+ kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs}
375
+ if randomized_function: # randomized functions have `seed` and `generator` parameters
376
+ if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None:
377
+ _, seed, pos, *_ = np.random.get_state()
378
+ seed = seed[pos] if pos < 624 else seed[0]
379
+ kwargs_for_fingerprint["generator"] = np.random.default_rng(seed)
380
+
381
+ # remove kwargs that are the default values
382
+
383
+ default_values = {
384
+ p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty
385
+ }
386
+ for default_varname, default_value in default_values.items():
387
+ if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value:
388
+ kwargs_for_fingerprint.pop(default_varname)
389
+ return kwargs_for_fingerprint
390
+
391
+
392
+ def fingerprint_transform(
393
+ inplace: bool,
394
+ use_kwargs: Optional[List[str]] = None,
395
+ ignore_kwargs: Optional[List[str]] = None,
396
+ fingerprint_names: Optional[List[str]] = None,
397
+ randomized_function: bool = False,
398
+ version: Optional[str] = None,
399
+ ):
400
+ """
401
+ Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint``
402
+ Args:
403
+ inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace.
404
+ Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of
405
+ setting the fingerprint of the returned Dataset.
406
+ use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account
407
+ to update the fingerprint to the wrapped method that should take care of
408
+ setting the fingerprint of the returned Dataset. By default all the arguments are used.
409
+ ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account
410
+ to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs.
411
+ fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]):
412
+ If the dataset transforms is not inplace and returns a DatasetDict, then it can require
413
+ several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names,
414
+ one fingerprint named after each element of fingerprint_names is going to be passed.
415
+ randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has
416
+ optional parameters "seed" and "generator", then you can set randomized_function to True.
417
+ This way, even if users set "seed" and "generator" to None, then the fingerprint is
418
+ going to be randomly generated depending on numpy's current state. In this case, the
419
+ generator is set to np.random.default_rng(np.random.get_state()[1][0]).
420
+ version (:obj:`str`, optional): version of the transform. The version is taken into account when
421
+ computing the fingerprint. If a datase transform changes (or at least if the output data
422
+ that are cached changes), then one should increase the version. If the version stays the
423
+ same, then old cached data could be reused that are not compatible with the new transform.
424
+ It should be in the format "MAJOR.MINOR.PATCH".
425
+ """
426
+
427
+ if use_kwargs is not None and not isinstance(use_kwargs, list):
428
+ raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}")
429
+
430
+ if ignore_kwargs is not None and not isinstance(ignore_kwargs, list):
431
+ raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}")
432
+
433
+ if inplace and fingerprint_names:
434
+ raise ValueError("fingerprint_names are only used when inplace is False")
435
+
436
+ fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"]
437
+
438
+ def _fingerprint(func):
439
+ if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names):
440
+ raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature")
441
+
442
+ if randomized_function: # randomized function have seed and generator parameters
443
+ if "seed" not in func.__code__.co_varnames:
444
+ raise ValueError(f"'seed' must be in {func}'s signature")
445
+ if "generator" not in func.__code__.co_varnames:
446
+ raise ValueError(f"'generator' must be in {func}'s signature")
447
+ # this call has to be outside the wrapper or since __qualname__ changes in multiprocessing
448
+ transform = format_transform_for_fingerprint(func, version=version)
449
+
450
+ @wraps(func)
451
+ def wrapper(*args, **kwargs):
452
+ kwargs_for_fingerprint = format_kwargs_for_fingerprint(
453
+ func,
454
+ args,
455
+ kwargs,
456
+ use_kwargs=use_kwargs,
457
+ ignore_kwargs=ignore_kwargs,
458
+ randomized_function=randomized_function,
459
+ )
460
+
461
+ if args:
462
+ dataset: Dataset = args[0]
463
+ args = args[1:]
464
+ else:
465
+ dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters)))
466
+
467
+ # compute new_fingerprint and add it to the args of not in-place transforms
468
+ if inplace:
469
+ new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint)
470
+ else:
471
+ for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes
472
+ if kwargs.get(fingerprint_name) is None:
473
+ kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name
474
+ kwargs[fingerprint_name] = update_fingerprint(
475
+ dataset._fingerprint, transform, kwargs_for_fingerprint
476
+ )
477
+ else:
478
+ validate_fingerprint(kwargs[fingerprint_name])
479
+
480
+ # Call actual function
481
+
482
+ out = func(dataset, *args, **kwargs)
483
+
484
+ # Update fingerprint of in-place transforms + update in-place history of transforms
485
+
486
+ if inplace: # update after calling func so that the fingerprint doesn't change if the function fails
487
+ dataset._fingerprint = new_fingerprint
488
+
489
+ return out
490
+
491
+ wrapper._decorator_name_ = "fingerprint"
492
+ return wrapper
493
+
494
+ return _fingerprint
venv/lib/python3.10/site-packages/datasets/info.py ADDED
@@ -0,0 +1,593 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """DatasetInfo and MetricInfo record information we know about a dataset and a metric.
17
+
18
+ This includes things that we know about the dataset statically, i.e.:
19
+ - description
20
+ - canonical location
21
+ - does it have validation and tests splits
22
+ - size
23
+ - etc.
24
+
25
+ This also includes the things that can and should be computed once we've
26
+ processed the dataset as well:
27
+ - number of examples (in each split)
28
+ - etc.
29
+ """
30
+
31
+ import copy
32
+ import dataclasses
33
+ import json
34
+ import os
35
+ import posixpath
36
+ import warnings
37
+ from dataclasses import dataclass
38
+ from pathlib import Path
39
+ from typing import ClassVar, Dict, List, Optional, Union
40
+
41
+ import fsspec
42
+ from fsspec.core import url_to_fs
43
+ from huggingface_hub import DatasetCard, DatasetCardData
44
+
45
+ from . import config
46
+ from .features import Features, Value
47
+ from .splits import SplitDict
48
+ from .tasks import TaskTemplate, task_template_from_dict
49
+ from .utils import Version
50
+ from .utils.logging import get_logger
51
+ from .utils.py_utils import asdict, unique_values
52
+
53
+
54
+ logger = get_logger(__name__)
55
+
56
+
57
+ @dataclass
58
+ class SupervisedKeysData:
59
+ input: str = ""
60
+ output: str = ""
61
+
62
+
63
+ @dataclass
64
+ class DownloadChecksumsEntryData:
65
+ key: str = ""
66
+ value: str = ""
67
+
68
+
69
+ class MissingCachedSizesConfigError(Exception):
70
+ """The expected cached sizes of the download file are missing."""
71
+
72
+
73
+ class NonMatchingCachedSizesError(Exception):
74
+ """The prepared split doesn't have expected sizes."""
75
+
76
+
77
+ @dataclass
78
+ class PostProcessedInfo:
79
+ features: Optional[Features] = None
80
+ resources_checksums: Optional[dict] = None
81
+
82
+ def __post_init__(self):
83
+ # Convert back to the correct classes when we reload from dict
84
+ if self.features is not None and not isinstance(self.features, Features):
85
+ self.features = Features.from_dict(self.features)
86
+
87
+ @classmethod
88
+ def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
89
+ field_names = {f.name for f in dataclasses.fields(cls)}
90
+ return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names})
91
+
92
+
93
+ @dataclass
94
+ class DatasetInfo:
95
+ """Information about a dataset.
96
+
97
+ `DatasetInfo` documents datasets, including its name, version, and features.
98
+ See the constructor arguments and properties for a full list.
99
+
100
+ Not all fields are known on construction and may be updated later.
101
+
102
+ Attributes:
103
+ description (`str`):
104
+ A description of the dataset.
105
+ citation (`str`):
106
+ A BibTeX citation of the dataset.
107
+ homepage (`str`):
108
+ A URL to the official homepage for the dataset.
109
+ license (`str`):
110
+ The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
111
+ features ([`Features`], *optional*):
112
+ The features used to specify the dataset's column types.
113
+ post_processed (`PostProcessedInfo`, *optional*):
114
+ Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
115
+ supervised_keys (`SupervisedKeysData`, *optional*):
116
+ Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
117
+ builder_name (`str`, *optional*):
118
+ The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
119
+ config_name (`str`, *optional*):
120
+ The name of the configuration derived from [`BuilderConfig`].
121
+ version (`str` or [`Version`], *optional*):
122
+ The version of the dataset.
123
+ splits (`dict`, *optional*):
124
+ The mapping between split name and metadata.
125
+ download_checksums (`dict`, *optional*):
126
+ The mapping between the URL to download the dataset's checksums and corresponding metadata.
127
+ download_size (`int`, *optional*):
128
+ The size of the files to download to generate the dataset, in bytes.
129
+ post_processing_size (`int`, *optional*):
130
+ Size of the dataset in bytes after post-processing, if any.
131
+ dataset_size (`int`, *optional*):
132
+ The combined size in bytes of the Arrow tables for all splits.
133
+ size_in_bytes (`int`, *optional*):
134
+ The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
135
+ task_templates (`List[TaskTemplate]`, *optional*):
136
+ The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
137
+ **config_kwargs (additional keyword arguments):
138
+ Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
139
+ """
140
+
141
+ # Set in the dataset scripts
142
+ description: str = dataclasses.field(default_factory=str)
143
+ citation: str = dataclasses.field(default_factory=str)
144
+ homepage: str = dataclasses.field(default_factory=str)
145
+ license: str = dataclasses.field(default_factory=str)
146
+ features: Optional[Features] = None
147
+ post_processed: Optional[PostProcessedInfo] = None
148
+ supervised_keys: Optional[SupervisedKeysData] = None
149
+ task_templates: Optional[List[TaskTemplate]] = None
150
+
151
+ # Set later by the builder
152
+ builder_name: Optional[str] = None
153
+ dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
154
+ config_name: Optional[str] = None
155
+ version: Optional[Union[str, Version]] = None
156
+ # Set later by `download_and_prepare`
157
+ splits: Optional[dict] = None
158
+ download_checksums: Optional[dict] = None
159
+ download_size: Optional[int] = None
160
+ post_processing_size: Optional[int] = None
161
+ dataset_size: Optional[int] = None
162
+ size_in_bytes: Optional[int] = None
163
+
164
+ _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
165
+ "config_name",
166
+ "download_size",
167
+ "dataset_size",
168
+ "features",
169
+ "splits",
170
+ ]
171
+
172
+ def __post_init__(self):
173
+ # Convert back to the correct classes when we reload from dict
174
+ if self.features is not None and not isinstance(self.features, Features):
175
+ self.features = Features.from_dict(self.features)
176
+ if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
177
+ self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
178
+ if self.version is not None and not isinstance(self.version, Version):
179
+ if isinstance(self.version, str):
180
+ self.version = Version(self.version)
181
+ else:
182
+ self.version = Version.from_dict(self.version)
183
+ if self.splits is not None and not isinstance(self.splits, SplitDict):
184
+ self.splits = SplitDict.from_split_dict(self.splits)
185
+ if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
186
+ if isinstance(self.supervised_keys, (tuple, list)):
187
+ self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
188
+ else:
189
+ self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
190
+
191
+ # Parse and make a list of templates
192
+ if self.task_templates is not None:
193
+ if isinstance(self.task_templates, (list, tuple)):
194
+ templates = [
195
+ template if isinstance(template, TaskTemplate) else task_template_from_dict(template)
196
+ for template in self.task_templates
197
+ ]
198
+ self.task_templates = [template for template in templates if template is not None]
199
+ elif isinstance(self.task_templates, TaskTemplate):
200
+ self.task_templates = [self.task_templates]
201
+ else:
202
+ template = task_template_from_dict(self.task_templates)
203
+ self.task_templates = [template] if template is not None else []
204
+
205
+ # Align task templates with features
206
+ if self.task_templates is not None:
207
+ self.task_templates = list(self.task_templates)
208
+ if self.features is not None:
209
+ self.task_templates = [
210
+ template.align_with_features(self.features) for template in (self.task_templates)
211
+ ]
212
+
213
+ def write_to_directory(
214
+ self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None
215
+ ):
216
+ """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
217
+
218
+ Args:
219
+ dataset_info_dir (`str`):
220
+ Destination directory.
221
+ pretty_print (`bool`, defaults to `False`):
222
+ If `True`, the JSON will be pretty-printed with the indent level of 4.
223
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
224
+ Instance of the remote filesystem used to download the files from.
225
+
226
+ <Deprecated version="2.9.0">
227
+
228
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
229
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
230
+
231
+ </Deprecated>
232
+
233
+ storage_options (`dict`, *optional*):
234
+ Key/value pairs to be passed on to the file-system backend, if any.
235
+
236
+ <Added version="2.9.0"/>
237
+
238
+ Example:
239
+
240
+ ```py
241
+ >>> from datasets import load_dataset
242
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
243
+ >>> ds.info.write_to_directory("/path/to/directory/")
244
+ ```
245
+ """
246
+ if fs != "deprecated":
247
+ warnings.warn(
248
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
249
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
250
+ FutureWarning,
251
+ )
252
+ storage_options = fs.storage_options
253
+
254
+ fs: fsspec.AbstractFileSystem
255
+ fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
256
+ with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
257
+ self._dump_info(f, pretty_print=pretty_print)
258
+ if self.license:
259
+ with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
260
+ self._dump_license(f)
261
+
262
+ def _dump_info(self, file, pretty_print=False):
263
+ """Dump info in `file` file-like object open in bytes mode (to support remote files)"""
264
+ file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8"))
265
+
266
+ def _dump_license(self, file):
267
+ """Dump license in `file` file-like object open in bytes mode (to support remote files)"""
268
+ file.write(self.license.encode("utf-8"))
269
+
270
+ @classmethod
271
+ def from_merge(cls, dataset_infos: List["DatasetInfo"]):
272
+ dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
273
+
274
+ if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos):
275
+ # if all dataset_infos are equal we don't need to merge. Just return the first.
276
+ return dataset_infos[0]
277
+
278
+ description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
279
+ citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
280
+ homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
281
+ license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
282
+ features = None
283
+ supervised_keys = None
284
+ task_templates = None
285
+
286
+ # Find common task templates across all dataset infos
287
+ all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None]
288
+ if len(all_task_templates) > 1:
289
+ task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:]))
290
+ elif len(all_task_templates):
291
+ task_templates = list(set(all_task_templates[0]))
292
+ # If no common task templates found, replace empty list with None
293
+ task_templates = task_templates if task_templates else None
294
+
295
+ return cls(
296
+ description=description,
297
+ citation=citation,
298
+ homepage=homepage,
299
+ license=license,
300
+ features=features,
301
+ supervised_keys=supervised_keys,
302
+ task_templates=task_templates,
303
+ )
304
+
305
+ @classmethod
306
+ def from_directory(
307
+ cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None
308
+ ) -> "DatasetInfo":
309
+ """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
310
+
311
+ This function updates all the dynamically generated fields (num_examples,
312
+ hash, time of creation,...) of the [`DatasetInfo`].
313
+
314
+ This will overwrite all previous metadata.
315
+
316
+ Args:
317
+ dataset_info_dir (`str`):
318
+ The directory containing the metadata file. This
319
+ should be the root directory of a specific dataset version.
320
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
321
+ Instance of the remote filesystem used to download the files from.
322
+
323
+ <Deprecated version="2.9.0">
324
+
325
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
326
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
327
+
328
+ </Deprecated>
329
+
330
+ storage_options (`dict`, *optional*):
331
+ Key/value pairs to be passed on to the file-system backend, if any.
332
+
333
+ <Added version="2.9.0"/>
334
+
335
+ Example:
336
+
337
+ ```py
338
+ >>> from datasets import DatasetInfo
339
+ >>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
340
+ ```
341
+ """
342
+ if fs != "deprecated":
343
+ warnings.warn(
344
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
345
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
346
+ FutureWarning,
347
+ )
348
+ storage_options = fs.storage_options
349
+
350
+ fs: fsspec.AbstractFileSystem
351
+ fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
352
+ logger.info(f"Loading Dataset info from {dataset_info_dir}")
353
+ if not dataset_info_dir:
354
+ raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
355
+ with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
356
+ dataset_info_dict = json.load(f)
357
+ return cls.from_dict(dataset_info_dict)
358
+
359
+ @classmethod
360
+ def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
361
+ field_names = {f.name for f in dataclasses.fields(cls)}
362
+ return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names})
363
+
364
+ def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
365
+ self_dict = self.__dict__
366
+ self_dict.update(
367
+ **{
368
+ k: copy.deepcopy(v)
369
+ for k, v in other_dataset_info.__dict__.items()
370
+ if (v is not None or not ignore_none)
371
+ }
372
+ )
373
+
374
+ def copy(self) -> "DatasetInfo":
375
+ return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
376
+
377
+ def _to_yaml_dict(self) -> dict:
378
+ yaml_dict = {}
379
+ dataset_info_dict = asdict(self)
380
+ for key in dataset_info_dict:
381
+ if key in self._INCLUDED_INFO_IN_YAML:
382
+ value = getattr(self, key)
383
+ if hasattr(value, "_to_yaml_list"): # Features, SplitDict
384
+ yaml_dict[key] = value._to_yaml_list()
385
+ elif hasattr(value, "_to_yaml_string"): # Version
386
+ yaml_dict[key] = value._to_yaml_string()
387
+ else:
388
+ yaml_dict[key] = value
389
+ return yaml_dict
390
+
391
+ @classmethod
392
+ def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
393
+ yaml_data = copy.deepcopy(yaml_data)
394
+ if yaml_data.get("features") is not None:
395
+ yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
396
+ if yaml_data.get("splits") is not None:
397
+ yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
398
+ field_names = {f.name for f in dataclasses.fields(cls)}
399
+ return cls(**{k: v for k, v in yaml_data.items() if k in field_names})
400
+
401
+
402
+ class DatasetInfosDict(Dict[str, DatasetInfo]):
403
+ def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
404
+ total_dataset_infos = {}
405
+ dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
406
+ dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)
407
+ if not overwrite:
408
+ total_dataset_infos = self.from_directory(dataset_infos_dir)
409
+ total_dataset_infos.update(self)
410
+ if os.path.exists(dataset_infos_path):
411
+ # for backward compatibility, let's update the JSON file if it exists
412
+ with open(dataset_infos_path, "w", encoding="utf-8") as f:
413
+ dataset_infos_dict = {
414
+ config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
415
+ }
416
+ json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
417
+ # Dump the infos in the YAML part of the README.md file
418
+ if os.path.exists(dataset_readme_path):
419
+ dataset_card = DatasetCard.load(dataset_readme_path)
420
+ dataset_card_data = dataset_card.data
421
+ else:
422
+ dataset_card = None
423
+ dataset_card_data = DatasetCardData()
424
+ if total_dataset_infos:
425
+ total_dataset_infos.to_dataset_card_data(dataset_card_data)
426
+ dataset_card = (
427
+ DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
428
+ )
429
+ dataset_card.save(Path(dataset_readme_path))
430
+
431
+ @classmethod
432
+ def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
433
+ logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
434
+ # Load the info from the YAML part of README.md
435
+ if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)):
436
+ dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data
437
+ if "dataset_info" in dataset_card_data:
438
+ return cls.from_dataset_card_data(dataset_card_data)
439
+ if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
440
+ # this is just to have backward compatibility with dataset_infos.json files
441
+ with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
442
+ return cls(
443
+ {
444
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
445
+ for config_name, dataset_info_dict in json.load(f).items()
446
+ }
447
+ )
448
+ else:
449
+ return cls()
450
+
451
+ @classmethod
452
+ def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
453
+ if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
454
+ if isinstance(dataset_card_data["dataset_info"], list):
455
+ return cls(
456
+ {
457
+ dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
458
+ dataset_info_yaml_dict
459
+ )
460
+ for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
461
+ }
462
+ )
463
+ else:
464
+ dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
465
+ dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
466
+ return cls({dataset_info.config_name: dataset_info})
467
+ else:
468
+ return cls()
469
+
470
+ def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
471
+ if self:
472
+ # first get existing metadata info
473
+ if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
474
+ dataset_metadata_infos = {
475
+ dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
476
+ }
477
+ elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
478
+ dataset_metadata_infos = {
479
+ config_metadata["config_name"]: config_metadata
480
+ for config_metadata in dataset_card_data["dataset_info"]
481
+ }
482
+ else:
483
+ dataset_metadata_infos = {}
484
+ # update/rewrite existing metadata info with the one to dump
485
+ total_dataset_infos = {
486
+ **dataset_metadata_infos,
487
+ **{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
488
+ }
489
+ # the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
490
+ for config_name, dset_info_yaml_dict in total_dataset_infos.items():
491
+ dset_info_yaml_dict["config_name"] = config_name
492
+ if len(total_dataset_infos) == 1:
493
+ # use a struct instead of a list of configurations, since there's only one
494
+ dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
495
+ config_name = dataset_card_data["dataset_info"].pop("config_name", None)
496
+ if config_name != "default":
497
+ # if config_name is not "default" preserve it and put at the first position
498
+ dataset_card_data["dataset_info"] = {
499
+ "config_name": config_name,
500
+ **dataset_card_data["dataset_info"],
501
+ }
502
+ else:
503
+ dataset_card_data["dataset_info"] = []
504
+ for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
505
+ # add the config_name field in first position
506
+ dataset_info_yaml_dict.pop("config_name", None)
507
+ dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
508
+ dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
509
+
510
+
511
+ @dataclass
512
+ class MetricInfo:
513
+ """Information about a metric.
514
+
515
+ `MetricInfo` documents a metric, including its name, version, and features.
516
+ See the constructor arguments and properties for a full list.
517
+
518
+ Note: Not all fields are known on construction and may be updated later.
519
+ """
520
+
521
+ # Set in the dataset scripts
522
+ description: str
523
+ citation: str
524
+ features: Features
525
+ inputs_description: str = dataclasses.field(default_factory=str)
526
+ homepage: str = dataclasses.field(default_factory=str)
527
+ license: str = dataclasses.field(default_factory=str)
528
+ codebase_urls: List[str] = dataclasses.field(default_factory=list)
529
+ reference_urls: List[str] = dataclasses.field(default_factory=list)
530
+ streamable: bool = False
531
+ format: Optional[str] = None
532
+
533
+ # Set later by the builder
534
+ metric_name: Optional[str] = None
535
+ config_name: Optional[str] = None
536
+ experiment_id: Optional[str] = None
537
+
538
+ def __post_init__(self):
539
+ if self.format is not None:
540
+ for key, value in self.features.items():
541
+ if not isinstance(value, Value):
542
+ raise ValueError(
543
+ f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
544
+ f"Here {key} is an instance of {value.__class__.__name__}"
545
+ )
546
+
547
+ def write_to_directory(self, metric_info_dir, pretty_print=False):
548
+ """Write `MetricInfo` as JSON to `metric_info_dir`.
549
+ Also save the license separately in LICENCE.
550
+ If `pretty_print` is True, the JSON will be pretty-printed with the indent level of 4.
551
+
552
+ Example:
553
+
554
+ ```py
555
+ >>> from datasets import load_metric
556
+ >>> metric = load_metric("accuracy")
557
+ >>> metric.info.write_to_directory("/path/to/directory/")
558
+ ```
559
+ """
560
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
561
+ json.dump(asdict(self), f, indent=4 if pretty_print else None)
562
+
563
+ if self.license:
564
+ with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
565
+ f.write(self.license)
566
+
567
+ @classmethod
568
+ def from_directory(cls, metric_info_dir) -> "MetricInfo":
569
+ """Create MetricInfo from the JSON file in `metric_info_dir`.
570
+
571
+ Args:
572
+ metric_info_dir: `str` The directory containing the metadata file. This
573
+ should be the root directory of a specific dataset version.
574
+
575
+ Example:
576
+
577
+ ```py
578
+ >>> from datasets import MetricInfo
579
+ >>> metric_info = MetricInfo.from_directory("/path/to/directory/")
580
+ ```
581
+ """
582
+ logger.info(f"Loading Metric info from {metric_info_dir}")
583
+ if not metric_info_dir:
584
+ raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.")
585
+
586
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
587
+ metric_info_dict = json.load(f)
588
+ return cls.from_dict(metric_info_dict)
589
+
590
+ @classmethod
591
+ def from_dict(cls, metric_info_dict: dict) -> "MetricInfo":
592
+ field_names = {f.name for f in dataclasses.fields(cls)}
593
+ return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
venv/lib/python3.10/site-packages/datasets/iterable_dataset.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/datasets/keyhash.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+
17
+ """
18
+ Hashing function for dataset keys using `hashlib.md5`
19
+
20
+ Requirements for the hash function:
21
+
22
+ - Provides a uniformly distributed hash from random space
23
+ - Adequately fast speed
24
+ - Working with multiple input types (in this case, `str`, `int` or `bytes`)
25
+ - Should be platform independent (generates same hash on different OS and systems)
26
+
27
+ The hashing function provides a unique 128-bit integer hash of the key provided.
28
+
29
+ The split name is being used here as the hash salt to avoid having same hashes
30
+ in different splits due to same keys
31
+ """
32
+
33
+ from typing import Union
34
+
35
+ from huggingface_hub.utils import insecure_hashlib
36
+
37
+
38
+ def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
39
+ """
40
+ Returns the input hash_data in its bytes form
41
+
42
+ Args:
43
+ hash_data: the hash salt/key to be converted to bytes
44
+ """
45
+ if isinstance(hash_data, bytes):
46
+ # Data already in bytes, returns as it as
47
+ return hash_data
48
+ elif isinstance(hash_data, str):
49
+ # We keep the data as it as for it ot be later encoded to UTF-8
50
+ # However replace `\\` with `/` for Windows compatibility
51
+ hash_data = hash_data.replace("\\", "/")
52
+ elif isinstance(hash_data, int):
53
+ hash_data = str(hash_data)
54
+ else:
55
+ # If data is not of the required type, raise error
56
+ raise InvalidKeyError(hash_data)
57
+
58
+ return hash_data.encode("utf-8")
59
+
60
+
61
+ class InvalidKeyError(Exception):
62
+ """Raises an error when given key is of invalid datatype."""
63
+
64
+ def __init__(self, hash_data):
65
+ self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
66
+ self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
67
+ self.suffix = "\nKeys should be either str, int or bytes type"
68
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
69
+
70
+
71
+ class DuplicatedKeysError(Exception):
72
+ """Raise an error when duplicate key found."""
73
+
74
+ def __init__(self, key, duplicate_key_indices, fix_msg=""):
75
+ self.key = key
76
+ self.duplicate_key_indices = duplicate_key_indices
77
+ self.fix_msg = fix_msg
78
+ self.prefix = "Found multiple examples generated with the same key"
79
+ if len(duplicate_key_indices) <= 20:
80
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
81
+ else:
82
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
83
+ self.suffix = "\n" + fix_msg if fix_msg else ""
84
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
85
+
86
+
87
+ class KeyHasher:
88
+ """KeyHasher class for providing hash using md5"""
89
+
90
+ def __init__(self, hash_salt: str):
91
+ self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
92
+
93
+ def hash(self, key: Union[str, int, bytes]) -> int:
94
+ """Returns 128-bits unique hash of input key
95
+
96
+ Args:
97
+ key: the input key to be hashed (should be str, int or bytes)
98
+
99
+ Returns: 128-bit int hash key"""
100
+ md5 = self._split_md5.copy()
101
+ byte_key = _as_bytes(key)
102
+ md5.update(byte_key)
103
+ # Convert to integer with hexadecimal conversion
104
+ return int(md5.hexdigest(), 16)
venv/lib/python3.10/site-packages/datasets/load.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/datasets/metric.py ADDED
@@ -0,0 +1,652 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Metrics base class."""
17
+
18
+ import os
19
+ import types
20
+ import uuid
21
+ from typing import Any, Dict, List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import pyarrow as pa
25
+ from filelock import BaseFileLock, Timeout
26
+
27
+ from . import config
28
+ from .arrow_dataset import Dataset
29
+ from .arrow_reader import ArrowReader
30
+ from .arrow_writer import ArrowWriter
31
+ from .download.download_config import DownloadConfig
32
+ from .download.download_manager import DownloadManager
33
+ from .features import Features
34
+ from .info import DatasetInfo, MetricInfo
35
+ from .naming import camelcase_to_snakecase
36
+ from .utils._filelock import FileLock
37
+ from .utils.deprecation_utils import deprecated
38
+ from .utils.logging import get_logger
39
+ from .utils.py_utils import copyfunc, temp_seed
40
+
41
+
42
+ logger = get_logger(__name__)
43
+
44
+
45
+ class FileFreeLock(BaseFileLock):
46
+ """Thread lock until a file **cannot** be locked"""
47
+
48
+ def __init__(self, lock_file, *args, **kwargs):
49
+ self.filelock = FileLock(lock_file)
50
+ super().__init__(self.filelock.lock_file, *args, **kwargs)
51
+
52
+ def _acquire(self):
53
+ try:
54
+ self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
55
+ except Timeout:
56
+ # We couldn't acquire the lock, the file is locked!
57
+ self._context.lock_file_fd = self.filelock.lock_file
58
+ else:
59
+ # We were able to acquire the lock, the file is not yet locked!
60
+ self.filelock.release()
61
+ self._context.lock_file_fd = None
62
+
63
+ def _release(self):
64
+ self._context.lock_file_fd = None
65
+
66
+
67
+ # lists - summarize long lists similarly to NumPy
68
+ # arrays/tensors - let the frameworks control formatting
69
+ def summarize_if_long_list(obj):
70
+ if not type(obj) == list or len(obj) <= 6: # noqa: E721
71
+ return f"{obj}"
72
+
73
+ def format_chunk(chunk):
74
+ return ", ".join(repr(x) for x in chunk)
75
+
76
+ return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
77
+
78
+
79
+ class MetricInfoMixin:
80
+ """This base class exposes some attributes of MetricInfo
81
+ at the base level of the Metric for easy access.
82
+
83
+ <Deprecated version="2.5.0">
84
+
85
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
86
+
87
+ </Deprecated>
88
+
89
+ """
90
+
91
+ def __init__(self, info: MetricInfo):
92
+ self._metric_info = info
93
+
94
+ @property
95
+ def info(self):
96
+ """:class:`datasets.MetricInfo` object containing all the metadata in the metric."""
97
+ return self._metric_info
98
+
99
+ @property
100
+ def name(self) -> str:
101
+ return self._metric_info.metric_name
102
+
103
+ @property
104
+ def experiment_id(self) -> Optional[str]:
105
+ return self._metric_info.experiment_id
106
+
107
+ @property
108
+ def description(self) -> str:
109
+ return self._metric_info.description
110
+
111
+ @property
112
+ def citation(self) -> str:
113
+ return self._metric_info.citation
114
+
115
+ @property
116
+ def features(self) -> Features:
117
+ return self._metric_info.features
118
+
119
+ @property
120
+ def inputs_description(self) -> str:
121
+ return self._metric_info.inputs_description
122
+
123
+ @property
124
+ def homepage(self) -> Optional[str]:
125
+ return self._metric_info.homepage
126
+
127
+ @property
128
+ def license(self) -> str:
129
+ return self._metric_info.license
130
+
131
+ @property
132
+ def codebase_urls(self) -> Optional[List[str]]:
133
+ return self._metric_info.codebase_urls
134
+
135
+ @property
136
+ def reference_urls(self) -> Optional[List[str]]:
137
+ return self._metric_info.reference_urls
138
+
139
+ @property
140
+ def streamable(self) -> bool:
141
+ return self._metric_info.streamable
142
+
143
+ @property
144
+ def format(self) -> Optional[str]:
145
+ return self._metric_info.format
146
+
147
+
148
+ class Metric(MetricInfoMixin):
149
+ """A Metric is the base class and common API for all metrics.
150
+
151
+ <Deprecated version="2.5.0">
152
+
153
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
154
+
155
+ </Deprecated>
156
+
157
+ Args:
158
+ config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
159
+ to be overridden when the metric loading script is modified.
160
+ keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings.
161
+ cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
162
+ The data directory should be located on a shared file-system in distributed setups.
163
+ num_process (``int``): specify the total number of nodes in a distributed settings.
164
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
165
+ process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
166
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
167
+ seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
168
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
169
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
170
+ max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
171
+ timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
172
+ """
173
+
174
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
175
+ def __init__(
176
+ self,
177
+ config_name: Optional[str] = None,
178
+ keep_in_memory: bool = False,
179
+ cache_dir: Optional[str] = None,
180
+ num_process: int = 1,
181
+ process_id: int = 0,
182
+ seed: Optional[int] = None,
183
+ experiment_id: Optional[str] = None,
184
+ max_concurrent_cache_files: int = 10000,
185
+ timeout: Union[int, float] = 100,
186
+ **kwargs,
187
+ ):
188
+ # prepare info
189
+ self.config_name = config_name or "default"
190
+ info = self._info()
191
+ info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
192
+ info.config_name = self.config_name
193
+ info.experiment_id = experiment_id or "default_experiment"
194
+ MetricInfoMixin.__init__(self, info) # For easy access on low level
195
+
196
+ # Safety checks on num_process and process_id
197
+ if not isinstance(process_id, int) or process_id < 0:
198
+ raise ValueError("'process_id' should be a number greater than 0")
199
+ if not isinstance(num_process, int) or num_process <= process_id:
200
+ raise ValueError("'num_process' should be a number greater than process_id")
201
+ if keep_in_memory and num_process != 1:
202
+ raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
203
+
204
+ self.num_process = num_process
205
+ self.process_id = process_id
206
+ self.max_concurrent_cache_files = max_concurrent_cache_files
207
+
208
+ self.keep_in_memory = keep_in_memory
209
+ self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
210
+ self.data_dir = self._build_data_dir()
211
+ if seed is None:
212
+ _, seed, pos, *_ = np.random.get_state()
213
+ self.seed: int = seed[pos] if pos < 624 else seed[0]
214
+ else:
215
+ self.seed: int = seed
216
+ self.timeout: Union[int, float] = timeout
217
+
218
+ # Update 'compute' and 'add' docstring
219
+ # methods need to be copied otherwise it changes the docstrings of every instance
220
+ self.compute = types.MethodType(copyfunc(self.compute), self)
221
+ self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
222
+ self.add = types.MethodType(copyfunc(self.add), self)
223
+ self.compute.__func__.__doc__ += self.info.inputs_description
224
+ self.add_batch.__func__.__doc__ += self.info.inputs_description
225
+ self.add.__func__.__doc__ += self.info.inputs_description
226
+
227
+ # self.arrow_schema = pa.schema(field for field in self.info.features.type)
228
+ self.buf_writer = None
229
+ self.writer = None
230
+ self.writer_batch_size = None
231
+ self.data = None
232
+
233
+ # This is the cache file we store our predictions/references in
234
+ # Keep it None for now so we can (cloud)pickle the object
235
+ self.cache_file_name = None
236
+ self.filelock = None
237
+ self.rendez_vous_lock = None
238
+
239
+ # This is all the cache files on which we have a lock when we are in a distributed setting
240
+ self.file_paths = None
241
+ self.filelocks = None
242
+
243
+ def __len__(self):
244
+ """Return the number of examples (predictions or predictions/references pair)
245
+ currently stored in the metric's cache.
246
+ """
247
+ return 0 if self.writer is None else len(self.writer)
248
+
249
+ def __repr__(self):
250
+ return (
251
+ f'Metric(name: "{self.name}", features: {self.features}, '
252
+ f'usage: """{self.inputs_description}""", '
253
+ f"stored examples: {len(self)})"
254
+ )
255
+
256
+ def _build_data_dir(self):
257
+ """Path of this metric in cache_dir:
258
+ Will be:
259
+ self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
260
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
261
+ """
262
+ builder_data_dir = self._data_dir_root
263
+ builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
264
+ os.makedirs(builder_data_dir, exist_ok=True)
265
+ return builder_data_dir
266
+
267
+ def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
268
+ """Create a new cache file. If the default cache file is used, we generated a new hash."""
269
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
270
+ filelock = None
271
+ for i in range(self.max_concurrent_cache_files):
272
+ filelock = FileLock(file_path + ".lock")
273
+ try:
274
+ filelock.acquire(timeout=timeout)
275
+ except Timeout:
276
+ # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
277
+ # We raise an error
278
+ if self.num_process != 1:
279
+ raise ValueError(
280
+ f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
281
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
282
+ f"between distributed metric instances."
283
+ ) from None
284
+ if i == self.max_concurrent_cache_files - 1:
285
+ raise ValueError(
286
+ f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
287
+ f"You should set a larger value of max_concurrent_cache_files when creating the metric "
288
+ f"(current value is {self.max_concurrent_cache_files})."
289
+ ) from None
290
+ # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
291
+ file_uuid = str(uuid.uuid4())
292
+ file_path = os.path.join(
293
+ self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
294
+ )
295
+ else:
296
+ break
297
+
298
+ return file_path, filelock
299
+
300
+ def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
301
+ """Get a lock on all the cache files in a distributed setup.
302
+ We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
303
+ """
304
+ if self.num_process == 1:
305
+ if self.cache_file_name is None:
306
+ raise ValueError(
307
+ "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
308
+ "at least once before calling `compute`."
309
+ )
310
+ file_paths = [self.cache_file_name]
311
+ else:
312
+ file_paths = [
313
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
314
+ for process_id in range(self.num_process)
315
+ ]
316
+
317
+ # Let's acquire a lock on each process files to be sure they are finished writing
318
+ filelocks = []
319
+ for process_id, file_path in enumerate(file_paths):
320
+ if process_id == 0: # process 0 already has its lock file
321
+ filelocks.append(self.filelock)
322
+ else:
323
+ filelock = FileLock(file_path + ".lock")
324
+ try:
325
+ filelock.acquire(timeout=self.timeout)
326
+ except Timeout:
327
+ raise ValueError(
328
+ f"Cannot acquire lock on cached file {file_path} for process {process_id}."
329
+ ) from None
330
+ else:
331
+ filelocks.append(filelock)
332
+
333
+ return file_paths, filelocks
334
+
335
+ def _check_all_processes_locks(self):
336
+ expected_lock_file_names = [
337
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
338
+ for process_id in range(self.num_process)
339
+ ]
340
+ for expected_lock_file_name in expected_lock_file_names:
341
+ nofilelock = FileFreeLock(expected_lock_file_name)
342
+ try:
343
+ nofilelock.acquire(timeout=self.timeout)
344
+ except Timeout:
345
+ raise ValueError(
346
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
347
+ ) from None
348
+ else:
349
+ nofilelock.release()
350
+
351
+ def _check_rendez_vous(self):
352
+ expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
353
+ nofilelock = FileFreeLock(expected_lock_file_name)
354
+ try:
355
+ nofilelock.acquire(timeout=self.timeout)
356
+ except Timeout:
357
+ raise ValueError(
358
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
359
+ ) from None
360
+ else:
361
+ nofilelock.release()
362
+ lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
363
+ rendez_vous_lock = FileLock(lock_file_name)
364
+ try:
365
+ rendez_vous_lock.acquire(timeout=self.timeout)
366
+ except Timeout:
367
+ raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
368
+ else:
369
+ rendez_vous_lock.release()
370
+
371
+ def _finalize(self):
372
+ """Close all the writing process and load/gather the data
373
+ from all the nodes if main node or all_process is True.
374
+ """
375
+ if self.writer is not None:
376
+ self.writer.finalize()
377
+ self.writer = None
378
+ # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
379
+ if self.filelock is not None and self.process_id > 0:
380
+ self.filelock.release()
381
+
382
+ if self.keep_in_memory:
383
+ # Read the predictions and references
384
+ reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
385
+ self.data = Dataset.from_buffer(self.buf_writer.getvalue())
386
+
387
+ elif self.process_id == 0:
388
+ # Let's acquire a lock on each node files to be sure they are finished writing
389
+ file_paths, filelocks = self._get_all_cache_files()
390
+
391
+ # Read the predictions and references
392
+ try:
393
+ reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
394
+ self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
395
+ except FileNotFoundError:
396
+ raise ValueError(
397
+ "Error in finalize: another metric instance is already using the local cache file. "
398
+ "Please specify an experiment_id to avoid collision between distributed metric instances."
399
+ ) from None
400
+
401
+ # Store file paths and locks and we will release/delete them after the computation.
402
+ self.file_paths = file_paths
403
+ self.filelocks = filelocks
404
+
405
+ def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
406
+ """Compute the metrics.
407
+
408
+ Usage of positional arguments is not allowed to prevent mistakes.
409
+
410
+ Args:
411
+ predictions (list/array/tensor, optional): Predictions.
412
+ references (list/array/tensor, optional): References.
413
+ **kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute`
414
+ method (see details in the docstring).
415
+
416
+ Return:
417
+ dict or None
418
+
419
+ - Dictionary with the metrics if this metric is run on the main process (``process_id == 0``).
420
+ - None if the metric is not run on the main process (``process_id != 0``).
421
+
422
+ Example:
423
+
424
+ ```py
425
+ >>> from datasets import load_metric
426
+ >>> metric = load_metric("accuracy")
427
+ >>> accuracy = metric.compute(predictions=model_prediction, references=labels)
428
+ ```
429
+ """
430
+ all_kwargs = {"predictions": predictions, "references": references, **kwargs}
431
+ if predictions is None and references is None:
432
+ missing_kwargs = {k: None for k in self.features if k not in all_kwargs}
433
+ all_kwargs.update(missing_kwargs)
434
+ else:
435
+ missing_inputs = [k for k in self.features if k not in all_kwargs]
436
+ if missing_inputs:
437
+ raise ValueError(
438
+ f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}"
439
+ )
440
+ inputs = {input_name: all_kwargs[input_name] for input_name in self.features}
441
+ compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features}
442
+
443
+ if any(v is not None for v in inputs.values()):
444
+ self.add_batch(**inputs)
445
+ self._finalize()
446
+
447
+ self.cache_file_name = None
448
+ self.filelock = None
449
+
450
+ if self.process_id == 0:
451
+ self.data.set_format(type=self.info.format)
452
+
453
+ inputs = {input_name: self.data[input_name] for input_name in self.features}
454
+ with temp_seed(self.seed):
455
+ output = self._compute(**inputs, **compute_kwargs)
456
+
457
+ if self.buf_writer is not None:
458
+ self.buf_writer = None
459
+ del self.data
460
+ self.data = None
461
+ else:
462
+ # Release locks and delete all the cache files. Process 0 is released last.
463
+ for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
464
+ logger.info(f"Removing {file_path}")
465
+ del self.data
466
+ self.data = None
467
+ del self.writer
468
+ self.writer = None
469
+ os.remove(file_path)
470
+ filelock.release()
471
+
472
+ return output
473
+ else:
474
+ return None
475
+
476
+ def add_batch(self, *, predictions=None, references=None, **kwargs):
477
+ """Add a batch of predictions and references for the metric's stack.
478
+
479
+ Args:
480
+ predictions (list/array/tensor, optional): Predictions.
481
+ references (list/array/tensor, optional): References.
482
+
483
+ Example:
484
+
485
+ ```py
486
+ >>> from datasets import load_metric
487
+ >>> metric = load_metric("accuracy")
488
+ >>> metric.add_batch(predictions=model_prediction, references=labels)
489
+ ```
490
+ """
491
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
492
+ if bad_inputs:
493
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
494
+ batch = {"predictions": predictions, "references": references, **kwargs}
495
+ batch = {intput_name: batch[intput_name] for intput_name in self.features}
496
+ batch = self.info.features.encode_batch(batch)
497
+ if self.writer is None:
498
+ self._init_writer()
499
+ try:
500
+ self.writer.write_batch(batch)
501
+ except pa.ArrowInvalid:
502
+ if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
503
+ col0 = next(iter(batch))
504
+ bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
505
+ error_msg = (
506
+ f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
507
+ )
508
+ elif sorted(self.features) != ["references", "predictions"]:
509
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
510
+ error_msg_inputs = ",\n".join(
511
+ f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features
512
+ )
513
+ error_msg += error_msg_inputs
514
+ else:
515
+ error_msg = (
516
+ f"Predictions and/or references don't match the expected format.\n"
517
+ f"Expected format: {self.features},\n"
518
+ f"Input predictions: {summarize_if_long_list(predictions)},\n"
519
+ f"Input references: {summarize_if_long_list(references)}"
520
+ )
521
+ raise ValueError(error_msg) from None
522
+
523
+ def add(self, *, prediction=None, reference=None, **kwargs):
524
+ """Add one prediction and reference for the metric's stack.
525
+
526
+ Args:
527
+ prediction (list/array/tensor, optional): Predictions.
528
+ reference (list/array/tensor, optional): References.
529
+
530
+ Example:
531
+
532
+ ```py
533
+ >>> from datasets import load_metric
534
+ >>> metric = load_metric("accuracy")
535
+ >>> metric.add(predictions=model_predictions, references=labels)
536
+ ```
537
+ """
538
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
539
+ if bad_inputs:
540
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
541
+ example = {"predictions": prediction, "references": reference, **kwargs}
542
+ example = {intput_name: example[intput_name] for intput_name in self.features}
543
+ example = self.info.features.encode_example(example)
544
+ if self.writer is None:
545
+ self._init_writer()
546
+ try:
547
+ self.writer.write(example)
548
+ except pa.ArrowInvalid:
549
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
550
+ error_msg_inputs = ",\n".join(
551
+ f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features
552
+ )
553
+ error_msg += error_msg_inputs
554
+ raise ValueError(error_msg) from None
555
+
556
+ def _init_writer(self, timeout=1):
557
+ if self.num_process > 1:
558
+ if self.process_id == 0:
559
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
560
+ self.rendez_vous_lock = FileLock(file_path)
561
+ try:
562
+ self.rendez_vous_lock.acquire(timeout=timeout)
563
+ except TimeoutError:
564
+ raise ValueError(
565
+ f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
566
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
567
+ f"between distributed metric instances."
568
+ ) from None
569
+
570
+ if self.keep_in_memory:
571
+ self.buf_writer = pa.BufferOutputStream()
572
+ self.writer = ArrowWriter(
573
+ features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
574
+ )
575
+ else:
576
+ self.buf_writer = None
577
+
578
+ # Get cache file name and lock it
579
+ if self.cache_file_name is None or self.filelock is None:
580
+ cache_file_name, filelock = self._create_cache_file() # get ready
581
+ self.cache_file_name = cache_file_name
582
+ self.filelock = filelock
583
+
584
+ self.writer = ArrowWriter(
585
+ features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
586
+ )
587
+ # Setup rendez-vous here if
588
+ if self.num_process > 1:
589
+ if self.process_id == 0:
590
+ self._check_all_processes_locks() # wait for everyone to be ready
591
+ self.rendez_vous_lock.release() # let everyone go
592
+ else:
593
+ self._check_rendez_vous() # wait for master to be ready and to let everyone go
594
+
595
+ def _info(self) -> MetricInfo:
596
+ """Construct the MetricInfo object. See `MetricInfo` for details.
597
+
598
+ Warning: This function is only called once and the result is cached for all
599
+ following .info() calls.
600
+
601
+ Returns:
602
+ info: (MetricInfo) The metrics information
603
+ """
604
+ raise NotImplementedError
605
+
606
+ def download_and_prepare(
607
+ self,
608
+ download_config: Optional[DownloadConfig] = None,
609
+ dl_manager: Optional[DownloadManager] = None,
610
+ ):
611
+ """Downloads and prepares dataset for reading.
612
+
613
+ Args:
614
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
615
+ dl_manager (:class:`DownloadManager`, optional): Specific download manager to use.
616
+ """
617
+ if dl_manager is None:
618
+ if download_config is None:
619
+ download_config = DownloadConfig()
620
+ download_config.cache_dir = os.path.join(self.data_dir, "downloads")
621
+ download_config.force_download = False
622
+
623
+ dl_manager = DownloadManager(
624
+ dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
625
+ )
626
+
627
+ self._download_and_prepare(dl_manager)
628
+
629
+ def _download_and_prepare(self, dl_manager):
630
+ """Downloads and prepares resources for the metric.
631
+
632
+ This is the internal implementation to overwrite called when user calls
633
+ `download_and_prepare`. It should download all required resources for the metric.
634
+
635
+ Args:
636
+ dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
637
+ """
638
+ return None
639
+
640
+ def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
641
+ """This method defines the common API for all the metrics in the library"""
642
+ raise NotImplementedError
643
+
644
+ def __del__(self):
645
+ if hasattr(self, "filelock") and self.filelock is not None:
646
+ self.filelock.release()
647
+ if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
648
+ self.rendez_vous_lock.release()
649
+ if hasattr(self, "writer"): # in case it was already deleted
650
+ del self.writer
651
+ if hasattr(self, "data"): # in case it was already deleted
652
+ del self.data
venv/lib/python3.10/site-packages/datasets/naming.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """Utilities for file names."""
17
+
18
+ import itertools
19
+ import os
20
+ import re
21
+
22
+
23
+ _uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
24
+ _lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
25
+
26
+ _single_underscore_re = re.compile(r"(?<!_)_(?!_)")
27
+ _multiple_underscores_re = re.compile(r"(_{2,})")
28
+
29
+ _split_re = r"^\w+(\.\w+)*$"
30
+
31
+ INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
32
+
33
+
34
+ def camelcase_to_snakecase(name):
35
+ """Convert camel-case string to snake-case."""
36
+ name = _uppercase_uppercase_re.sub(r"\1_\2", name)
37
+ name = _lowercase_uppercase_re.sub(r"\1_\2", name)
38
+ return name.lower()
39
+
40
+
41
+ def snakecase_to_camelcase(name):
42
+ """Convert snake-case string to camel-case string."""
43
+ name = _single_underscore_re.split(name)
44
+ name = [_multiple_underscores_re.split(n) for n in name]
45
+ return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
46
+
47
+
48
+ def filename_prefix_for_name(name):
49
+ if os.path.basename(name) != name:
50
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
51
+ return camelcase_to_snakecase(name)
52
+
53
+
54
+ def filename_prefix_for_split(name, split):
55
+ if os.path.basename(name) != name:
56
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
57
+ if not re.match(_split_re, split):
58
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
59
+ return f"{filename_prefix_for_name(name)}-{split}"
60
+
61
+
62
+ def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
63
+ prefix = filename_prefix_for_split(dataset_name, split)
64
+ if filetype_suffix:
65
+ prefix += f".{filetype_suffix}"
66
+ filepath = os.path.join(data_dir, prefix)
67
+ return f"{filepath}*"
68
+
69
+
70
+ def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
71
+ prefix = filename_prefix_for_split(dataset_name, split)
72
+ prefix = os.path.join(path, prefix)
73
+
74
+ if shard_lengths:
75
+ num_shards = len(shard_lengths)
76
+ filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
77
+ if filetype_suffix:
78
+ filenames = [filename + f".{filetype_suffix}" for filename in filenames]
79
+ return filenames
80
+ else:
81
+ filename = prefix
82
+ if filetype_suffix:
83
+ filename += f".{filetype_suffix}"
84
+ return [filename]
venv/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Dict, List, Tuple
4
+
5
+ from huggingface_hub.utils import insecure_hashlib
6
+
7
+ from .arrow import arrow
8
+ from .audiofolder import audiofolder
9
+ from .cache import cache # noqa F401
10
+ from .csv import csv
11
+ from .imagefolder import imagefolder
12
+ from .json import json
13
+ from .pandas import pandas
14
+ from .parquet import parquet
15
+ from .sql import sql # noqa F401
16
+ from .text import text
17
+ from .webdataset import webdataset
18
+
19
+
20
+ def _hash_python_lines(lines: List[str]) -> str:
21
+ filtered_lines = []
22
+ for line in lines:
23
+ line = re.sub(r"#.*", "", line) # remove comments
24
+ if line:
25
+ filtered_lines.append(line)
26
+ full_str = "\n".join(filtered_lines)
27
+
28
+ # Make a hash from all this code
29
+ full_bytes = full_str.encode("utf-8")
30
+ return insecure_hashlib.sha256(full_bytes).hexdigest()
31
+
32
+
33
+ # get importable module names and hash for caching
34
+ _PACKAGED_DATASETS_MODULES = {
35
+ "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
36
+ "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
37
+ "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
38
+ "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
39
+ "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
40
+ "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
41
+ "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
42
+ "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
43
+ "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
44
+ }
45
+
46
+ # Used to infer the module to use based on the data files extensions
47
+ _EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
48
+ ".csv": ("csv", {}),
49
+ ".tsv": ("csv", {"sep": "\t"}),
50
+ ".json": ("json", {}),
51
+ ".jsonl": ("json", {}),
52
+ ".parquet": ("parquet", {}),
53
+ ".geoparquet": ("parquet", {}),
54
+ ".gpq": ("parquet", {}),
55
+ ".arrow": ("arrow", {}),
56
+ ".txt": ("text", {}),
57
+ ".tar": ("webdataset", {}),
58
+ }
59
+ _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
60
+ _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
61
+ _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
62
+ _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
63
+ _MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
64
+
65
+ # Used to filter data files based on extensions given a module name
66
+ _MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
67
+ for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
68
+ _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
69
+
70
+ for _module in _MODULE_TO_EXTENSIONS:
71
+ _MODULE_TO_EXTENSIONS[_module].append(".zip")
venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (199 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc ADDED
Binary file (3.06 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ import pyarrow as pa
6
+
7
+ import datasets
8
+ from datasets.table import table_cast
9
+
10
+
11
+ logger = datasets.utils.logging.get_logger(__name__)
12
+
13
+
14
+ @dataclass
15
+ class ArrowConfig(datasets.BuilderConfig):
16
+ """BuilderConfig for Arrow."""
17
+
18
+ features: Optional[datasets.Features] = None
19
+
20
+
21
+ class Arrow(datasets.ArrowBasedBuilder):
22
+ BUILDER_CONFIG_CLASS = ArrowConfig
23
+
24
+ def _info(self):
25
+ return datasets.DatasetInfo(features=self.config.features)
26
+
27
+ def _split_generators(self, dl_manager):
28
+ """We handle string, list and dicts in datafiles"""
29
+ if not self.config.data_files:
30
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
31
+ dl_manager.download_config.extract_on_the_fly = True
32
+ data_files = dl_manager.download_and_extract(self.config.data_files)
33
+ if isinstance(data_files, (str, list, tuple)):
34
+ files = data_files
35
+ if isinstance(files, str):
36
+ files = [files]
37
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
38
+ files = [dl_manager.iter_files(file) for file in files]
39
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
40
+ splits = []
41
+ for split_name, files in data_files.items():
42
+ if isinstance(files, str):
43
+ files = [files]
44
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
45
+ files = [dl_manager.iter_files(file) for file in files]
46
+ # Infer features is they are stoed in the arrow schema
47
+ if self.info.features is None:
48
+ for file in itertools.chain.from_iterable(files):
49
+ with open(file, "rb") as f:
50
+ self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
51
+ break
52
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
53
+ return splits
54
+
55
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
56
+ if self.info.features is not None:
57
+ # more expensive cast to support nested features with keys in a different order
58
+ # allows str <-> int/float or str to Audio for example
59
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
60
+ return pa_table
61
+
62
+ def _generate_tables(self, files):
63
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
64
+ with open(file, "rb") as f:
65
+ try:
66
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
67
+ pa_table = pa.Table.from_batches([record_batch])
68
+ # Uncomment for debugging (will print the Arrow table size and elements)
69
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
70
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
71
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
72
+ except ValueError as e:
73
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
74
+ raise
venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import datasets
4
+ from datasets.tasks import AudioClassification
5
+
6
+ from ..folder_based_builder import folder_based_builder
7
+
8
+
9
+ logger = datasets.utils.logging.get_logger(__name__)
10
+
11
+
12
+ class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
13
+ """Builder Config for AudioFolder."""
14
+
15
+ drop_labels: bool = None
16
+ drop_metadata: bool = None
17
+
18
+
19
+ class AudioFolder(folder_based_builder.FolderBasedBuilder):
20
+ BASE_FEATURE = datasets.Audio
21
+ BASE_COLUMN_NAME = "audio"
22
+ BUILDER_CONFIG_CLASS = AudioFolderConfig
23
+ EXTENSIONS: List[str] # definition at the bottom of the script
24
+ CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
25
+
26
+
27
+ # Obtained with:
28
+ # ```
29
+ # import soundfile as sf
30
+ #
31
+ # AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
32
+ #
33
+ # # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
34
+ # AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
35
+ # ```
36
+ # We intentionally do not run this code on launch because:
37
+ # (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
38
+ # (2) To ensure the list of supported extensions is deterministic
39
+ AUDIO_EXTENSIONS = [
40
+ ".aiff",
41
+ ".au",
42
+ ".avr",
43
+ ".caf",
44
+ ".flac",
45
+ ".htk",
46
+ ".svx",
47
+ ".mat4",
48
+ ".mat5",
49
+ ".mpc2k",
50
+ ".ogg",
51
+ ".paf",
52
+ ".pvf",
53
+ ".raw",
54
+ ".rf64",
55
+ ".sd2",
56
+ ".sds",
57
+ ".ircam",
58
+ ".voc",
59
+ ".w64",
60
+ ".wav",
61
+ ".nist",
62
+ ".wavex",
63
+ ".wve",
64
+ ".xi",
65
+ ".mp3",
66
+ ".opus",
67
+ ]
68
+ AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (203 Bytes). View file
 
venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc ADDED
Binary file (1.69 kB). View file