Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/__init__.py +20 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/audio.py +277 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/features.py +2202 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/image.py +383 -0
- llmeval-env/lib/python3.10/site-packages/datasets/features/translation.py +129 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py +74 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py +68 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py +285 -0
- llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py +299 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__init__.py +27 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dill.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/experimental.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/file_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/patching.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/_dataset_viewer.py +96 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/_dill.py +459 -0
- llmeval-env/lib/python3.10/site-packages/datasets/utils/_filelock.py +59 -0
.gitattributes
CHANGED
@@ -62,3 +62,5 @@ llmeval-env/bin/python3 filter=lfs diff=lfs merge=lfs -text
|
|
62 |
llmeval-env/bin/python filter=lfs diff=lfs merge=lfs -text
|
63 |
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
64 |
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
62 |
llmeval-env/bin/python filter=lfs diff=lfs merge=lfs -text
|
63 |
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
64 |
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
65 |
+
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
66 |
+
llmeval-env/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc
ADDED
Binary file (232 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc
ADDED
Binary file (24.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc
ADDED
Binary file (78 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc
ADDED
Binary file (6.79 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc
ADDED
Binary file (1.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc
ADDED
Binary file (17.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc
ADDED
Binary file (23.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc
ADDED
Binary file (3.43 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc
ADDED
Binary file (86.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc
ADDED
Binary file (33.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc
ADDED
Binary file (4.94 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc
ADDED
Binary file (75.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/features/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ruff: noqa
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"Audio",
|
5 |
+
"Array2D",
|
6 |
+
"Array3D",
|
7 |
+
"Array4D",
|
8 |
+
"Array5D",
|
9 |
+
"ClassLabel",
|
10 |
+
"Features",
|
11 |
+
"Sequence",
|
12 |
+
"Value",
|
13 |
+
"Image",
|
14 |
+
"Translation",
|
15 |
+
"TranslationVariableLanguages",
|
16 |
+
]
|
17 |
+
from .audio import Audio
|
18 |
+
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
|
19 |
+
from .image import Image
|
20 |
+
from .translation import Translation, TranslationVariableLanguages
|
llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (596 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc
ADDED
Binary file (10.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc
ADDED
Binary file (75.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc
ADDED
Binary file (12.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc
ADDED
Binary file (5.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/features/audio.py
ADDED
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dataclasses import dataclass, field
|
3 |
+
from io import BytesIO
|
4 |
+
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pyarrow as pa
|
8 |
+
|
9 |
+
from .. import config
|
10 |
+
from ..download.download_config import DownloadConfig
|
11 |
+
from ..table import array_cast
|
12 |
+
from ..utils.file_utils import xopen, xsplitext
|
13 |
+
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
|
14 |
+
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from .features import FeatureType
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class Audio:
|
22 |
+
"""Audio [`Feature`] to extract audio data from an audio file.
|
23 |
+
|
24 |
+
Input: The Audio feature accepts as input:
|
25 |
+
- A `str`: Absolute path to the audio file (i.e. random access is allowed).
|
26 |
+
- A `dict` with the keys:
|
27 |
+
|
28 |
+
- `path`: String with relative path of the audio file to the archive file.
|
29 |
+
- `bytes`: Bytes content of the audio file.
|
30 |
+
|
31 |
+
This is useful for archived files with sequential access.
|
32 |
+
|
33 |
+
- A `dict` with the keys:
|
34 |
+
|
35 |
+
- `path`: String with relative path of the audio file to the archive file.
|
36 |
+
- `array`: Array containing the audio sample
|
37 |
+
- `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
|
38 |
+
|
39 |
+
This is useful for archived files with sequential access.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
sampling_rate (`int`, *optional*):
|
43 |
+
Target sampling rate. If `None`, the native sampling rate is used.
|
44 |
+
mono (`bool`, defaults to `True`):
|
45 |
+
Whether to convert the audio signal to mono by averaging samples across
|
46 |
+
channels.
|
47 |
+
decode (`bool`, defaults to `True`):
|
48 |
+
Whether to decode the audio data. If `False`,
|
49 |
+
returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
|
50 |
+
|
51 |
+
Example:
|
52 |
+
|
53 |
+
```py
|
54 |
+
>>> from datasets import load_dataset, Audio
|
55 |
+
>>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
|
56 |
+
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
|
57 |
+
>>> ds[0]["audio"]
|
58 |
+
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
|
59 |
+
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
|
60 |
+
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
|
61 |
+
'sampling_rate': 16000}
|
62 |
+
```
|
63 |
+
"""
|
64 |
+
|
65 |
+
sampling_rate: Optional[int] = None
|
66 |
+
mono: bool = True
|
67 |
+
decode: bool = True
|
68 |
+
id: Optional[str] = None
|
69 |
+
# Automatically constructed
|
70 |
+
dtype: ClassVar[str] = "dict"
|
71 |
+
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
|
72 |
+
_type: str = field(default="Audio", init=False, repr=False)
|
73 |
+
|
74 |
+
def __call__(self):
|
75 |
+
return self.pa_type
|
76 |
+
|
77 |
+
def encode_example(self, value: Union[str, bytes, dict]) -> dict:
|
78 |
+
"""Encode example into a format for Arrow.
|
79 |
+
|
80 |
+
Args:
|
81 |
+
value (`str` or `dict`):
|
82 |
+
Data passed as input to Audio feature.
|
83 |
+
|
84 |
+
Returns:
|
85 |
+
`dict`
|
86 |
+
"""
|
87 |
+
try:
|
88 |
+
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
|
89 |
+
except ImportError as err:
|
90 |
+
raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
|
91 |
+
if isinstance(value, str):
|
92 |
+
return {"bytes": None, "path": value}
|
93 |
+
elif isinstance(value, bytes):
|
94 |
+
return {"bytes": value, "path": None}
|
95 |
+
elif "array" in value:
|
96 |
+
# convert the audio array to wav bytes
|
97 |
+
buffer = BytesIO()
|
98 |
+
sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
|
99 |
+
return {"bytes": buffer.getvalue(), "path": None}
|
100 |
+
elif value.get("path") is not None and os.path.isfile(value["path"]):
|
101 |
+
# we set "bytes": None to not duplicate the data if they're already available locally
|
102 |
+
if value["path"].endswith("pcm"):
|
103 |
+
# "PCM" only has raw audio bytes
|
104 |
+
if value.get("sampling_rate") is None:
|
105 |
+
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
|
106 |
+
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
|
107 |
+
if value.get("bytes"):
|
108 |
+
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
|
109 |
+
bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
|
110 |
+
else:
|
111 |
+
bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
|
112 |
+
|
113 |
+
buffer = BytesIO(bytes())
|
114 |
+
sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
|
115 |
+
return {"bytes": buffer.getvalue(), "path": None}
|
116 |
+
else:
|
117 |
+
return {"bytes": None, "path": value.get("path")}
|
118 |
+
elif value.get("bytes") is not None or value.get("path") is not None:
|
119 |
+
# store the audio bytes, and path is used to infer the audio format using the file extension
|
120 |
+
return {"bytes": value.get("bytes"), "path": value.get("path")}
|
121 |
+
else:
|
122 |
+
raise ValueError(
|
123 |
+
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
|
124 |
+
)
|
125 |
+
|
126 |
+
def decode_example(
|
127 |
+
self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
|
128 |
+
) -> dict:
|
129 |
+
"""Decode example audio file into audio data.
|
130 |
+
|
131 |
+
Args:
|
132 |
+
value (`dict`):
|
133 |
+
A dictionary with keys:
|
134 |
+
|
135 |
+
- `path`: String with relative audio file path.
|
136 |
+
- `bytes`: Bytes of the audio file.
|
137 |
+
token_per_repo_id (`dict`, *optional*):
|
138 |
+
To access and decode
|
139 |
+
audio files from private repositories on the Hub, you can pass
|
140 |
+
a dictionary repo_id (`str`) -> token (`bool` or `str`)
|
141 |
+
|
142 |
+
Returns:
|
143 |
+
`dict`
|
144 |
+
"""
|
145 |
+
if not self.decode:
|
146 |
+
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
|
147 |
+
|
148 |
+
path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
|
149 |
+
if path is None and file is None:
|
150 |
+
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
|
151 |
+
|
152 |
+
try:
|
153 |
+
import librosa
|
154 |
+
import soundfile as sf
|
155 |
+
except ImportError as err:
|
156 |
+
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
|
157 |
+
|
158 |
+
audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
|
159 |
+
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
|
160 |
+
raise RuntimeError(
|
161 |
+
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
|
162 |
+
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
|
163 |
+
)
|
164 |
+
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
|
165 |
+
raise RuntimeError(
|
166 |
+
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
|
167 |
+
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
|
168 |
+
)
|
169 |
+
|
170 |
+
if file is None:
|
171 |
+
token_per_repo_id = token_per_repo_id or {}
|
172 |
+
source_url = path.split("::")[-1]
|
173 |
+
pattern = (
|
174 |
+
config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
|
175 |
+
)
|
176 |
+
try:
|
177 |
+
repo_id = string_to_dict(source_url, pattern)["repo_id"]
|
178 |
+
token = token_per_repo_id[repo_id]
|
179 |
+
except (ValueError, KeyError):
|
180 |
+
token = None
|
181 |
+
|
182 |
+
download_config = DownloadConfig(token=token)
|
183 |
+
with xopen(path, "rb", download_config=download_config) as f:
|
184 |
+
array, sampling_rate = sf.read(f)
|
185 |
+
|
186 |
+
else:
|
187 |
+
array, sampling_rate = sf.read(file)
|
188 |
+
|
189 |
+
array = array.T
|
190 |
+
if self.mono:
|
191 |
+
array = librosa.to_mono(array)
|
192 |
+
if self.sampling_rate and self.sampling_rate != sampling_rate:
|
193 |
+
array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
|
194 |
+
sampling_rate = self.sampling_rate
|
195 |
+
|
196 |
+
return {"path": path, "array": array, "sampling_rate": sampling_rate}
|
197 |
+
|
198 |
+
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
|
199 |
+
"""If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
|
200 |
+
from .features import Value
|
201 |
+
|
202 |
+
if self.decode:
|
203 |
+
raise ValueError("Cannot flatten a decoded Audio feature.")
|
204 |
+
return {
|
205 |
+
"bytes": Value("binary"),
|
206 |
+
"path": Value("string"),
|
207 |
+
}
|
208 |
+
|
209 |
+
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
|
210 |
+
"""Cast an Arrow array to the Audio arrow storage type.
|
211 |
+
The Arrow types that can be converted to the Audio pyarrow storage type are:
|
212 |
+
|
213 |
+
- `pa.string()` - it must contain the "path" data
|
214 |
+
- `pa.binary()` - it must contain the audio bytes
|
215 |
+
- `pa.struct({"bytes": pa.binary()})`
|
216 |
+
- `pa.struct({"path": pa.string()})`
|
217 |
+
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
|
218 |
+
|
219 |
+
Args:
|
220 |
+
storage (`Union[pa.StringArray, pa.StructArray]`):
|
221 |
+
PyArrow array to cast.
|
222 |
+
|
223 |
+
Returns:
|
224 |
+
`pa.StructArray`: Array in the Audio arrow storage type, that is
|
225 |
+
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`
|
226 |
+
"""
|
227 |
+
if pa.types.is_string(storage.type):
|
228 |
+
bytes_array = pa.array([None] * len(storage), type=pa.binary())
|
229 |
+
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
|
230 |
+
elif pa.types.is_binary(storage.type):
|
231 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
232 |
+
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
|
233 |
+
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
|
234 |
+
storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
|
235 |
+
elif pa.types.is_struct(storage.type):
|
236 |
+
if storage.type.get_field_index("bytes") >= 0:
|
237 |
+
bytes_array = storage.field("bytes")
|
238 |
+
else:
|
239 |
+
bytes_array = pa.array([None] * len(storage), type=pa.binary())
|
240 |
+
if storage.type.get_field_index("path") >= 0:
|
241 |
+
path_array = storage.field("path")
|
242 |
+
else:
|
243 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
244 |
+
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
|
245 |
+
return array_cast(storage, self.pa_type)
|
246 |
+
|
247 |
+
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
|
248 |
+
"""Embed audio files into the Arrow array.
|
249 |
+
|
250 |
+
Args:
|
251 |
+
storage (`pa.StructArray`):
|
252 |
+
PyArrow array to embed.
|
253 |
+
|
254 |
+
Returns:
|
255 |
+
`pa.StructArray`: Array in the Audio arrow storage type, that is
|
256 |
+
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
|
257 |
+
"""
|
258 |
+
|
259 |
+
@no_op_if_value_is_null
|
260 |
+
def path_to_bytes(path):
|
261 |
+
with xopen(path, "rb") as f:
|
262 |
+
bytes_ = f.read()
|
263 |
+
return bytes_
|
264 |
+
|
265 |
+
bytes_array = pa.array(
|
266 |
+
[
|
267 |
+
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
|
268 |
+
for x in storage.to_pylist()
|
269 |
+
],
|
270 |
+
type=pa.binary(),
|
271 |
+
)
|
272 |
+
path_array = pa.array(
|
273 |
+
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
|
274 |
+
type=pa.string(),
|
275 |
+
)
|
276 |
+
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
|
277 |
+
return array_cast(storage, self.pa_type)
|
llmeval-env/lib/python3.10/site-packages/datasets/features/features.py
ADDED
@@ -0,0 +1,2202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
# Lint as: python3
|
16 |
+
"""This class handle features definition in datasets and some utilities to display table type."""
|
17 |
+
|
18 |
+
import copy
|
19 |
+
import json
|
20 |
+
import re
|
21 |
+
import sys
|
22 |
+
from collections.abc import Iterable, Mapping
|
23 |
+
from collections.abc import Sequence as SequenceABC
|
24 |
+
from dataclasses import InitVar, dataclass, field, fields
|
25 |
+
from functools import reduce, wraps
|
26 |
+
from operator import mul
|
27 |
+
from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
|
28 |
+
from typing import Sequence as Sequence_
|
29 |
+
|
30 |
+
import numpy as np
|
31 |
+
import pandas as pd
|
32 |
+
import pyarrow as pa
|
33 |
+
import pyarrow.compute as pc
|
34 |
+
import pyarrow.types
|
35 |
+
import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
|
36 |
+
from pandas.api.extensions import ExtensionArray as PandasExtensionArray
|
37 |
+
from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
|
38 |
+
|
39 |
+
from .. import config
|
40 |
+
from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
|
41 |
+
from ..table import array_cast
|
42 |
+
from ..utils import experimental, logging
|
43 |
+
from ..utils.py_utils import asdict, first_non_null_value, zip_dict
|
44 |
+
from .audio import Audio
|
45 |
+
from .image import Image, encode_pil_image
|
46 |
+
from .translation import Translation, TranslationVariableLanguages
|
47 |
+
|
48 |
+
|
49 |
+
logger = logging.get_logger(__name__)
|
50 |
+
|
51 |
+
|
52 |
+
def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
|
53 |
+
"""
|
54 |
+
_arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
|
55 |
+
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
|
56 |
+
"""
|
57 |
+
if pyarrow.types.is_null(arrow_type):
|
58 |
+
return "null"
|
59 |
+
elif pyarrow.types.is_boolean(arrow_type):
|
60 |
+
return "bool"
|
61 |
+
elif pyarrow.types.is_int8(arrow_type):
|
62 |
+
return "int8"
|
63 |
+
elif pyarrow.types.is_int16(arrow_type):
|
64 |
+
return "int16"
|
65 |
+
elif pyarrow.types.is_int32(arrow_type):
|
66 |
+
return "int32"
|
67 |
+
elif pyarrow.types.is_int64(arrow_type):
|
68 |
+
return "int64"
|
69 |
+
elif pyarrow.types.is_uint8(arrow_type):
|
70 |
+
return "uint8"
|
71 |
+
elif pyarrow.types.is_uint16(arrow_type):
|
72 |
+
return "uint16"
|
73 |
+
elif pyarrow.types.is_uint32(arrow_type):
|
74 |
+
return "uint32"
|
75 |
+
elif pyarrow.types.is_uint64(arrow_type):
|
76 |
+
return "uint64"
|
77 |
+
elif pyarrow.types.is_float16(arrow_type):
|
78 |
+
return "float16" # pyarrow dtype is "halffloat"
|
79 |
+
elif pyarrow.types.is_float32(arrow_type):
|
80 |
+
return "float32" # pyarrow dtype is "float"
|
81 |
+
elif pyarrow.types.is_float64(arrow_type):
|
82 |
+
return "float64" # pyarrow dtype is "double"
|
83 |
+
elif pyarrow.types.is_time32(arrow_type):
|
84 |
+
return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
|
85 |
+
elif pyarrow.types.is_time64(arrow_type):
|
86 |
+
return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
|
87 |
+
elif pyarrow.types.is_timestamp(arrow_type):
|
88 |
+
if arrow_type.tz is None:
|
89 |
+
return f"timestamp[{arrow_type.unit}]"
|
90 |
+
elif arrow_type.tz:
|
91 |
+
return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
|
92 |
+
else:
|
93 |
+
raise ValueError(f"Unexpected timestamp object {arrow_type}.")
|
94 |
+
elif pyarrow.types.is_date32(arrow_type):
|
95 |
+
return "date32" # pyarrow dtype is "date32[day]"
|
96 |
+
elif pyarrow.types.is_date64(arrow_type):
|
97 |
+
return "date64" # pyarrow dtype is "date64[ms]"
|
98 |
+
elif pyarrow.types.is_duration(arrow_type):
|
99 |
+
return f"duration[{arrow_type.unit}]"
|
100 |
+
elif pyarrow.types.is_decimal128(arrow_type):
|
101 |
+
return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
|
102 |
+
elif pyarrow.types.is_decimal256(arrow_type):
|
103 |
+
return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
|
104 |
+
elif pyarrow.types.is_binary(arrow_type):
|
105 |
+
return "binary"
|
106 |
+
elif pyarrow.types.is_large_binary(arrow_type):
|
107 |
+
return "large_binary"
|
108 |
+
elif pyarrow.types.is_string(arrow_type):
|
109 |
+
return "string"
|
110 |
+
elif pyarrow.types.is_large_string(arrow_type):
|
111 |
+
return "large_string"
|
112 |
+
else:
|
113 |
+
raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
|
114 |
+
|
115 |
+
|
116 |
+
def string_to_arrow(datasets_dtype: str) -> pa.DataType:
|
117 |
+
"""
|
118 |
+
string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
|
119 |
+
|
120 |
+
In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
|
121 |
+
|
122 |
+
This is necessary because the datasets.Value() primitive type is constructed using a string dtype
|
123 |
+
|
124 |
+
Value(dtype=str)
|
125 |
+
|
126 |
+
But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
|
127 |
+
which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
|
128 |
+
purpose of this function.
|
129 |
+
"""
|
130 |
+
|
131 |
+
def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
|
132 |
+
msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
|
133 |
+
if examples:
|
134 |
+
examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
|
135 |
+
msg += f"\nValid examples include: {examples}."
|
136 |
+
if urls:
|
137 |
+
urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
|
138 |
+
msg += f"\nFor more insformation, see: {urls}."
|
139 |
+
return msg
|
140 |
+
|
141 |
+
if datasets_dtype in pa.__dict__:
|
142 |
+
return pa.__dict__[datasets_dtype]()
|
143 |
+
|
144 |
+
if (datasets_dtype + "_") in pa.__dict__:
|
145 |
+
return pa.__dict__[datasets_dtype + "_"]()
|
146 |
+
|
147 |
+
timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
|
148 |
+
if timestamp_matches:
|
149 |
+
timestamp_internals = timestamp_matches.group(1)
|
150 |
+
internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
|
151 |
+
if timestamp_internals in ["s", "ms", "us", "ns"]:
|
152 |
+
return pa.timestamp(timestamp_internals)
|
153 |
+
elif internals_matches:
|
154 |
+
return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
|
155 |
+
else:
|
156 |
+
raise ValueError(
|
157 |
+
_dtype_error_msg(
|
158 |
+
datasets_dtype,
|
159 |
+
"timestamp",
|
160 |
+
examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
|
161 |
+
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
|
162 |
+
)
|
163 |
+
)
|
164 |
+
|
165 |
+
duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
|
166 |
+
if duration_matches:
|
167 |
+
duration_internals = duration_matches.group(1)
|
168 |
+
if duration_internals in ["s", "ms", "us", "ns"]:
|
169 |
+
return pa.duration(duration_internals)
|
170 |
+
else:
|
171 |
+
raise ValueError(
|
172 |
+
_dtype_error_msg(
|
173 |
+
datasets_dtype,
|
174 |
+
"duration",
|
175 |
+
examples=["duration[s]", "duration[us]"],
|
176 |
+
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
|
177 |
+
)
|
178 |
+
)
|
179 |
+
|
180 |
+
time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
|
181 |
+
if time_matches:
|
182 |
+
time_internals_bits = time_matches.group(1)
|
183 |
+
if time_internals_bits == "32":
|
184 |
+
time_internals_unit = time_matches.group(2)
|
185 |
+
if time_internals_unit in ["s", "ms"]:
|
186 |
+
return pa.time32(time_internals_unit)
|
187 |
+
else:
|
188 |
+
raise ValueError(
|
189 |
+
f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
|
190 |
+
)
|
191 |
+
elif time_internals_bits == "64":
|
192 |
+
time_internals_unit = time_matches.group(2)
|
193 |
+
if time_internals_unit in ["us", "ns"]:
|
194 |
+
return pa.time64(time_internals_unit)
|
195 |
+
else:
|
196 |
+
raise ValueError(
|
197 |
+
f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
|
198 |
+
)
|
199 |
+
else:
|
200 |
+
raise ValueError(
|
201 |
+
_dtype_error_msg(
|
202 |
+
datasets_dtype,
|
203 |
+
"time",
|
204 |
+
examples=["time32[s]", "time64[us]"],
|
205 |
+
urls=[
|
206 |
+
"https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
|
207 |
+
"https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
|
208 |
+
],
|
209 |
+
)
|
210 |
+
)
|
211 |
+
|
212 |
+
decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
|
213 |
+
if decimal_matches:
|
214 |
+
decimal_internals_bits = decimal_matches.group(1)
|
215 |
+
if decimal_internals_bits == "128":
|
216 |
+
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
|
217 |
+
if decimal_internals_precision_and_scale:
|
218 |
+
precision = decimal_internals_precision_and_scale.group(1)
|
219 |
+
scale = decimal_internals_precision_and_scale.group(2)
|
220 |
+
return pa.decimal128(int(precision), int(scale))
|
221 |
+
else:
|
222 |
+
raise ValueError(
|
223 |
+
_dtype_error_msg(
|
224 |
+
datasets_dtype,
|
225 |
+
"decimal128",
|
226 |
+
examples=["decimal128(10, 2)", "decimal128(4, -2)"],
|
227 |
+
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
|
228 |
+
)
|
229 |
+
)
|
230 |
+
elif decimal_internals_bits == "256":
|
231 |
+
decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
|
232 |
+
if decimal_internals_precision_and_scale:
|
233 |
+
precision = decimal_internals_precision_and_scale.group(1)
|
234 |
+
scale = decimal_internals_precision_and_scale.group(2)
|
235 |
+
return pa.decimal256(int(precision), int(scale))
|
236 |
+
else:
|
237 |
+
raise ValueError(
|
238 |
+
_dtype_error_msg(
|
239 |
+
datasets_dtype,
|
240 |
+
"decimal256",
|
241 |
+
examples=["decimal256(30, 2)", "decimal256(38, -4)"],
|
242 |
+
urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
|
243 |
+
)
|
244 |
+
)
|
245 |
+
else:
|
246 |
+
raise ValueError(
|
247 |
+
_dtype_error_msg(
|
248 |
+
datasets_dtype,
|
249 |
+
"decimal",
|
250 |
+
examples=["decimal128(12, 3)", "decimal256(40, 6)"],
|
251 |
+
urls=[
|
252 |
+
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
|
253 |
+
"https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
|
254 |
+
],
|
255 |
+
)
|
256 |
+
)
|
257 |
+
|
258 |
+
raise ValueError(
|
259 |
+
f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
|
260 |
+
f"Please make sure to use a correct data type, see: "
|
261 |
+
f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
|
262 |
+
)
|
263 |
+
|
264 |
+
|
265 |
+
def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
|
266 |
+
"""
|
267 |
+
Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
|
268 |
+
It works recursively.
|
269 |
+
|
270 |
+
If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
|
271 |
+
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
|
272 |
+
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
|
273 |
+
|
274 |
+
Args:
|
275 |
+
obj: the object (nested struct) to cast.
|
276 |
+
only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
|
277 |
+
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
|
278 |
+
Indeed Arrow only support converting 1-dimensional array values.
|
279 |
+
optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
|
280 |
+
and if it doesn't, not checking the rest of the list elements.
|
281 |
+
|
282 |
+
Returns:
|
283 |
+
casted_obj: the casted object
|
284 |
+
has_changed (bool): True if the object has been changed, False if it is identical
|
285 |
+
"""
|
286 |
+
|
287 |
+
if config.TF_AVAILABLE and "tensorflow" in sys.modules:
|
288 |
+
import tensorflow as tf
|
289 |
+
|
290 |
+
if config.TORCH_AVAILABLE and "torch" in sys.modules:
|
291 |
+
import torch
|
292 |
+
|
293 |
+
if config.JAX_AVAILABLE and "jax" in sys.modules:
|
294 |
+
import jax.numpy as jnp
|
295 |
+
|
296 |
+
if config.PIL_AVAILABLE and "PIL" in sys.modules:
|
297 |
+
import PIL.Image
|
298 |
+
|
299 |
+
if isinstance(obj, np.ndarray):
|
300 |
+
if obj.ndim == 0:
|
301 |
+
return obj[()], True
|
302 |
+
elif not only_1d_for_numpy or obj.ndim == 1:
|
303 |
+
return obj, False
|
304 |
+
else:
|
305 |
+
return (
|
306 |
+
[
|
307 |
+
_cast_to_python_objects(
|
308 |
+
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
309 |
+
)[0]
|
310 |
+
for x in obj
|
311 |
+
],
|
312 |
+
True,
|
313 |
+
)
|
314 |
+
elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
|
315 |
+
if obj.ndim == 0:
|
316 |
+
return obj.detach().cpu().numpy()[()], True
|
317 |
+
elif not only_1d_for_numpy or obj.ndim == 1:
|
318 |
+
return obj.detach().cpu().numpy(), True
|
319 |
+
else:
|
320 |
+
return (
|
321 |
+
[
|
322 |
+
_cast_to_python_objects(
|
323 |
+
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
324 |
+
)[0]
|
325 |
+
for x in obj.detach().cpu().numpy()
|
326 |
+
],
|
327 |
+
True,
|
328 |
+
)
|
329 |
+
elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
|
330 |
+
if obj.ndim == 0:
|
331 |
+
return obj.numpy()[()], True
|
332 |
+
elif not only_1d_for_numpy or obj.ndim == 1:
|
333 |
+
return obj.numpy(), True
|
334 |
+
else:
|
335 |
+
return (
|
336 |
+
[
|
337 |
+
_cast_to_python_objects(
|
338 |
+
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
339 |
+
)[0]
|
340 |
+
for x in obj.numpy()
|
341 |
+
],
|
342 |
+
True,
|
343 |
+
)
|
344 |
+
elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
|
345 |
+
if obj.ndim == 0:
|
346 |
+
return np.asarray(obj)[()], True
|
347 |
+
elif not only_1d_for_numpy or obj.ndim == 1:
|
348 |
+
return np.asarray(obj), True
|
349 |
+
else:
|
350 |
+
return (
|
351 |
+
[
|
352 |
+
_cast_to_python_objects(
|
353 |
+
x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
354 |
+
)[0]
|
355 |
+
for x in np.asarray(obj)
|
356 |
+
],
|
357 |
+
True,
|
358 |
+
)
|
359 |
+
elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
|
360 |
+
return encode_pil_image(obj), True
|
361 |
+
elif isinstance(obj, pd.Series):
|
362 |
+
return (
|
363 |
+
_cast_to_python_objects(
|
364 |
+
obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
365 |
+
)[0],
|
366 |
+
True,
|
367 |
+
)
|
368 |
+
elif isinstance(obj, pd.DataFrame):
|
369 |
+
return (
|
370 |
+
{
|
371 |
+
key: _cast_to_python_objects(
|
372 |
+
value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
373 |
+
)[0]
|
374 |
+
for key, value in obj.to_dict("series").items()
|
375 |
+
},
|
376 |
+
True,
|
377 |
+
)
|
378 |
+
elif isinstance(obj, pd.Timestamp):
|
379 |
+
return obj.to_pydatetime(), True
|
380 |
+
elif isinstance(obj, pd.Timedelta):
|
381 |
+
return obj.to_pytimedelta(), True
|
382 |
+
elif isinstance(obj, Mapping):
|
383 |
+
has_changed = not isinstance(obj, dict)
|
384 |
+
output = {}
|
385 |
+
for k, v in obj.items():
|
386 |
+
casted_v, has_changed_v = _cast_to_python_objects(
|
387 |
+
v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
388 |
+
)
|
389 |
+
has_changed |= has_changed_v
|
390 |
+
output[k] = casted_v
|
391 |
+
return output if has_changed else obj, has_changed
|
392 |
+
elif hasattr(obj, "__array__"):
|
393 |
+
return (
|
394 |
+
_cast_to_python_objects(
|
395 |
+
obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
396 |
+
)[0],
|
397 |
+
True,
|
398 |
+
)
|
399 |
+
elif isinstance(obj, (list, tuple)):
|
400 |
+
if len(obj) > 0:
|
401 |
+
for first_elmt in obj:
|
402 |
+
if _check_non_null_non_empty_recursive(first_elmt):
|
403 |
+
break
|
404 |
+
casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
|
405 |
+
first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
406 |
+
)
|
407 |
+
if has_changed_first_elmt or not optimize_list_casting:
|
408 |
+
return (
|
409 |
+
[
|
410 |
+
_cast_to_python_objects(
|
411 |
+
elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
412 |
+
)[0]
|
413 |
+
for elmt in obj
|
414 |
+
],
|
415 |
+
True,
|
416 |
+
)
|
417 |
+
else:
|
418 |
+
if isinstance(obj, (list, tuple)):
|
419 |
+
return obj, False
|
420 |
+
else:
|
421 |
+
return list(obj), True
|
422 |
+
else:
|
423 |
+
return obj, False
|
424 |
+
else:
|
425 |
+
return obj, False
|
426 |
+
|
427 |
+
|
428 |
+
def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
|
429 |
+
"""
|
430 |
+
Cast numpy/pytorch/tensorflow/pandas objects to python lists.
|
431 |
+
It works recursively.
|
432 |
+
|
433 |
+
If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
|
434 |
+
If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
|
435 |
+
This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
|
436 |
+
|
437 |
+
Args:
|
438 |
+
obj: the object (nested struct) to cast
|
439 |
+
only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
|
440 |
+
nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
|
441 |
+
Indeed Arrow only support converting 1-dimensional array values.
|
442 |
+
optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
|
443 |
+
and if it doesn't, not checking the rest of the list elements.
|
444 |
+
|
445 |
+
Returns:
|
446 |
+
casted_obj: the casted object
|
447 |
+
"""
|
448 |
+
return _cast_to_python_objects(
|
449 |
+
obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
|
450 |
+
)[0]
|
451 |
+
|
452 |
+
|
453 |
+
@dataclass
|
454 |
+
class Value:
|
455 |
+
"""
|
456 |
+
The `Value` dtypes are as follows:
|
457 |
+
|
458 |
+
- `null`
|
459 |
+
- `bool`
|
460 |
+
- `int8`
|
461 |
+
- `int16`
|
462 |
+
- `int32`
|
463 |
+
- `int64`
|
464 |
+
- `uint8`
|
465 |
+
- `uint16`
|
466 |
+
- `uint32`
|
467 |
+
- `uint64`
|
468 |
+
- `float16`
|
469 |
+
- `float32` (alias float)
|
470 |
+
- `float64` (alias double)
|
471 |
+
- `time32[(s|ms)]`
|
472 |
+
- `time64[(us|ns)]`
|
473 |
+
- `timestamp[(s|ms|us|ns)]`
|
474 |
+
- `timestamp[(s|ms|us|ns), tz=(tzstring)]`
|
475 |
+
- `date32`
|
476 |
+
- `date64`
|
477 |
+
- `duration[(s|ms|us|ns)]`
|
478 |
+
- `decimal128(precision, scale)`
|
479 |
+
- `decimal256(precision, scale)`
|
480 |
+
- `binary`
|
481 |
+
- `large_binary`
|
482 |
+
- `string`
|
483 |
+
- `large_string`
|
484 |
+
|
485 |
+
Example:
|
486 |
+
|
487 |
+
```py
|
488 |
+
>>> from datasets import Features
|
489 |
+
>>> features = Features({'stars': Value(dtype='int32')})
|
490 |
+
>>> features
|
491 |
+
{'stars': Value(dtype='int32', id=None)}
|
492 |
+
```
|
493 |
+
"""
|
494 |
+
|
495 |
+
dtype: str
|
496 |
+
id: Optional[str] = None
|
497 |
+
# Automatically constructed
|
498 |
+
pa_type: ClassVar[Any] = None
|
499 |
+
_type: str = field(default="Value", init=False, repr=False)
|
500 |
+
|
501 |
+
def __post_init__(self):
|
502 |
+
if self.dtype == "double": # fix inferred type
|
503 |
+
self.dtype = "float64"
|
504 |
+
if self.dtype == "float": # fix inferred type
|
505 |
+
self.dtype = "float32"
|
506 |
+
self.pa_type = string_to_arrow(self.dtype)
|
507 |
+
|
508 |
+
def __call__(self):
|
509 |
+
return self.pa_type
|
510 |
+
|
511 |
+
def encode_example(self, value):
|
512 |
+
if pa.types.is_boolean(self.pa_type):
|
513 |
+
return bool(value)
|
514 |
+
elif pa.types.is_integer(self.pa_type):
|
515 |
+
return int(value)
|
516 |
+
elif pa.types.is_floating(self.pa_type):
|
517 |
+
return float(value)
|
518 |
+
elif pa.types.is_string(self.pa_type):
|
519 |
+
return str(value)
|
520 |
+
else:
|
521 |
+
return value
|
522 |
+
|
523 |
+
|
524 |
+
class _ArrayXD:
|
525 |
+
def __post_init__(self):
|
526 |
+
self.shape = tuple(self.shape)
|
527 |
+
|
528 |
+
def __call__(self):
|
529 |
+
pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
|
530 |
+
return pa_type
|
531 |
+
|
532 |
+
def encode_example(self, value):
|
533 |
+
return value
|
534 |
+
|
535 |
+
|
536 |
+
@dataclass
|
537 |
+
class Array2D(_ArrayXD):
|
538 |
+
"""Create a two-dimensional array.
|
539 |
+
|
540 |
+
Args:
|
541 |
+
shape (`tuple`):
|
542 |
+
The size of each dimension.
|
543 |
+
dtype (`str`):
|
544 |
+
The value of the data type.
|
545 |
+
|
546 |
+
Example:
|
547 |
+
|
548 |
+
```py
|
549 |
+
>>> from datasets import Features
|
550 |
+
>>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
|
551 |
+
```
|
552 |
+
"""
|
553 |
+
|
554 |
+
shape: tuple
|
555 |
+
dtype: str
|
556 |
+
id: Optional[str] = None
|
557 |
+
# Automatically constructed
|
558 |
+
_type: str = field(default="Array2D", init=False, repr=False)
|
559 |
+
|
560 |
+
|
561 |
+
@dataclass
|
562 |
+
class Array3D(_ArrayXD):
|
563 |
+
"""Create a three-dimensional array.
|
564 |
+
|
565 |
+
Args:
|
566 |
+
shape (`tuple`):
|
567 |
+
The size of each dimension.
|
568 |
+
dtype (`str`):
|
569 |
+
The value of the data type.
|
570 |
+
|
571 |
+
Example:
|
572 |
+
|
573 |
+
```py
|
574 |
+
>>> from datasets import Features
|
575 |
+
>>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
|
576 |
+
```
|
577 |
+
"""
|
578 |
+
|
579 |
+
shape: tuple
|
580 |
+
dtype: str
|
581 |
+
id: Optional[str] = None
|
582 |
+
# Automatically constructed
|
583 |
+
_type: str = field(default="Array3D", init=False, repr=False)
|
584 |
+
|
585 |
+
|
586 |
+
@dataclass
|
587 |
+
class Array4D(_ArrayXD):
|
588 |
+
"""Create a four-dimensional array.
|
589 |
+
|
590 |
+
Args:
|
591 |
+
shape (`tuple`):
|
592 |
+
The size of each dimension.
|
593 |
+
dtype (`str`):
|
594 |
+
The value of the data type.
|
595 |
+
|
596 |
+
Example:
|
597 |
+
|
598 |
+
```py
|
599 |
+
>>> from datasets import Features
|
600 |
+
>>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
|
601 |
+
```
|
602 |
+
"""
|
603 |
+
|
604 |
+
shape: tuple
|
605 |
+
dtype: str
|
606 |
+
id: Optional[str] = None
|
607 |
+
# Automatically constructed
|
608 |
+
_type: str = field(default="Array4D", init=False, repr=False)
|
609 |
+
|
610 |
+
|
611 |
+
@dataclass
|
612 |
+
class Array5D(_ArrayXD):
|
613 |
+
"""Create a five-dimensional array.
|
614 |
+
|
615 |
+
Args:
|
616 |
+
shape (`tuple`):
|
617 |
+
The size of each dimension.
|
618 |
+
dtype (`str`):
|
619 |
+
The value of the data type.
|
620 |
+
|
621 |
+
Example:
|
622 |
+
|
623 |
+
```py
|
624 |
+
>>> from datasets import Features
|
625 |
+
>>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
|
626 |
+
```
|
627 |
+
"""
|
628 |
+
|
629 |
+
shape: tuple
|
630 |
+
dtype: str
|
631 |
+
id: Optional[str] = None
|
632 |
+
# Automatically constructed
|
633 |
+
_type: str = field(default="Array5D", init=False, repr=False)
|
634 |
+
|
635 |
+
|
636 |
+
class _ArrayXDExtensionType(pa.ExtensionType):
|
637 |
+
ndims: Optional[int] = None
|
638 |
+
|
639 |
+
def __init__(self, shape: tuple, dtype: str):
|
640 |
+
if self.ndims is None or self.ndims <= 1:
|
641 |
+
raise ValueError("You must instantiate an array type with a value for dim that is > 1")
|
642 |
+
if len(shape) != self.ndims:
|
643 |
+
raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
|
644 |
+
for dim in range(1, self.ndims):
|
645 |
+
if shape[dim] is None:
|
646 |
+
raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
|
647 |
+
self.shape = tuple(shape)
|
648 |
+
self.value_type = dtype
|
649 |
+
self.storage_dtype = self._generate_dtype(self.value_type)
|
650 |
+
pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
|
651 |
+
|
652 |
+
def __arrow_ext_serialize__(self):
|
653 |
+
return json.dumps((self.shape, self.value_type)).encode()
|
654 |
+
|
655 |
+
@classmethod
|
656 |
+
def __arrow_ext_deserialize__(cls, storage_type, serialized):
|
657 |
+
args = json.loads(serialized)
|
658 |
+
return cls(*args)
|
659 |
+
|
660 |
+
# This was added to pa.ExtensionType in pyarrow >= 13.0.0
|
661 |
+
def __reduce__(self):
|
662 |
+
return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
|
663 |
+
|
664 |
+
def __hash__(self):
|
665 |
+
return hash((self.__class__, self.shape, self.value_type))
|
666 |
+
|
667 |
+
def __arrow_ext_class__(self):
|
668 |
+
return ArrayExtensionArray
|
669 |
+
|
670 |
+
def _generate_dtype(self, dtype):
|
671 |
+
dtype = string_to_arrow(dtype)
|
672 |
+
for d in reversed(self.shape):
|
673 |
+
dtype = pa.list_(dtype)
|
674 |
+
# Don't specify the size of the list, since fixed length list arrays have issues
|
675 |
+
# being validated after slicing in pyarrow 0.17.1
|
676 |
+
return dtype
|
677 |
+
|
678 |
+
def to_pandas_dtype(self):
|
679 |
+
return PandasArrayExtensionDtype(self.value_type)
|
680 |
+
|
681 |
+
|
682 |
+
class Array2DExtensionType(_ArrayXDExtensionType):
|
683 |
+
ndims = 2
|
684 |
+
|
685 |
+
|
686 |
+
class Array3DExtensionType(_ArrayXDExtensionType):
|
687 |
+
ndims = 3
|
688 |
+
|
689 |
+
|
690 |
+
class Array4DExtensionType(_ArrayXDExtensionType):
|
691 |
+
ndims = 4
|
692 |
+
|
693 |
+
|
694 |
+
class Array5DExtensionType(_ArrayXDExtensionType):
|
695 |
+
ndims = 5
|
696 |
+
|
697 |
+
|
698 |
+
# Register the extension types for deserialization
|
699 |
+
pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
|
700 |
+
pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
|
701 |
+
pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
|
702 |
+
pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
|
703 |
+
|
704 |
+
|
705 |
+
def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
|
706 |
+
"""
|
707 |
+
When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
|
708 |
+
This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
|
709 |
+
|
710 |
+
# zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
|
711 |
+
# primitive types are types for which the physical representation in arrow and in numpy
|
712 |
+
# https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
|
713 |
+
# see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
|
714 |
+
# and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
|
715 |
+
"""
|
716 |
+
|
717 |
+
def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
|
718 |
+
if pa.types.is_list(pa_type):
|
719 |
+
return _unnest_pa_type(pa_type.value_type)
|
720 |
+
return pa_type
|
721 |
+
|
722 |
+
if unnest:
|
723 |
+
pa_type = _unnest_pa_type(pa_type)
|
724 |
+
return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
|
725 |
+
|
726 |
+
|
727 |
+
class ArrayExtensionArray(pa.ExtensionArray):
|
728 |
+
def __array__(self):
|
729 |
+
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
|
730 |
+
return self.to_numpy(zero_copy_only=zero_copy_only)
|
731 |
+
|
732 |
+
def __getitem__(self, i):
|
733 |
+
return self.storage[i]
|
734 |
+
|
735 |
+
def to_numpy(self, zero_copy_only=True):
|
736 |
+
storage: pa.ListArray = self.storage
|
737 |
+
null_mask = storage.is_null().to_numpy(zero_copy_only=False)
|
738 |
+
|
739 |
+
if self.type.shape[0] is not None:
|
740 |
+
size = 1
|
741 |
+
null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
|
742 |
+
|
743 |
+
for i in range(self.type.ndims):
|
744 |
+
size *= self.type.shape[i]
|
745 |
+
storage = storage.flatten()
|
746 |
+
numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
|
747 |
+
numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
|
748 |
+
|
749 |
+
if len(null_indices):
|
750 |
+
numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
|
751 |
+
|
752 |
+
else:
|
753 |
+
shape = self.type.shape
|
754 |
+
ndims = self.type.ndims
|
755 |
+
arrays = []
|
756 |
+
first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
|
757 |
+
for i, is_null in enumerate(null_mask):
|
758 |
+
if is_null:
|
759 |
+
arrays.append(np.nan)
|
760 |
+
else:
|
761 |
+
storage_el = storage[i : i + 1]
|
762 |
+
first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
|
763 |
+
# flatten storage
|
764 |
+
for _ in range(ndims):
|
765 |
+
storage_el = storage_el.flatten()
|
766 |
+
|
767 |
+
numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
|
768 |
+
arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
|
769 |
+
|
770 |
+
if len(np.unique(np.diff(first_dim_offsets))) > 1:
|
771 |
+
# ragged
|
772 |
+
numpy_arr = np.empty(len(arrays), dtype=object)
|
773 |
+
numpy_arr[:] = arrays
|
774 |
+
else:
|
775 |
+
numpy_arr = np.array(arrays)
|
776 |
+
|
777 |
+
return numpy_arr
|
778 |
+
|
779 |
+
def to_pylist(self):
|
780 |
+
zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
|
781 |
+
numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
|
782 |
+
if self.type.shape[0] is None and numpy_arr.dtype == object:
|
783 |
+
return [arr.tolist() for arr in numpy_arr.tolist()]
|
784 |
+
else:
|
785 |
+
return numpy_arr.tolist()
|
786 |
+
|
787 |
+
|
788 |
+
class PandasArrayExtensionDtype(PandasExtensionDtype):
|
789 |
+
_metadata = "value_type"
|
790 |
+
|
791 |
+
def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
|
792 |
+
self._value_type = value_type
|
793 |
+
|
794 |
+
def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
|
795 |
+
if isinstance(array, pa.ChunkedArray):
|
796 |
+
array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
|
797 |
+
zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
|
798 |
+
numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
|
799 |
+
return PandasArrayExtensionArray(numpy_arr)
|
800 |
+
|
801 |
+
@classmethod
|
802 |
+
def construct_array_type(cls):
|
803 |
+
return PandasArrayExtensionArray
|
804 |
+
|
805 |
+
@property
|
806 |
+
def type(self) -> type:
|
807 |
+
return np.ndarray
|
808 |
+
|
809 |
+
@property
|
810 |
+
def kind(self) -> str:
|
811 |
+
return "O"
|
812 |
+
|
813 |
+
@property
|
814 |
+
def name(self) -> str:
|
815 |
+
return f"array[{self.value_type}]"
|
816 |
+
|
817 |
+
@property
|
818 |
+
def value_type(self) -> np.dtype:
|
819 |
+
return self._value_type
|
820 |
+
|
821 |
+
|
822 |
+
class PandasArrayExtensionArray(PandasExtensionArray):
|
823 |
+
def __init__(self, data: np.ndarray, copy: bool = False):
|
824 |
+
self._data = data if not copy else np.array(data)
|
825 |
+
self._dtype = PandasArrayExtensionDtype(data.dtype)
|
826 |
+
|
827 |
+
def __array__(self, dtype=None):
|
828 |
+
"""
|
829 |
+
Convert to NumPy Array.
|
830 |
+
Note that Pandas expects a 1D array when dtype is set to object.
|
831 |
+
But for other dtypes, the returned shape is the same as the one of ``data``.
|
832 |
+
|
833 |
+
More info about pandas 1D requirement for PandasExtensionArray here:
|
834 |
+
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
|
835 |
+
|
836 |
+
"""
|
837 |
+
if dtype == object:
|
838 |
+
out = np.empty(len(self._data), dtype=object)
|
839 |
+
for i in range(len(self._data)):
|
840 |
+
out[i] = self._data[i]
|
841 |
+
return out
|
842 |
+
if dtype is None:
|
843 |
+
return self._data
|
844 |
+
else:
|
845 |
+
return self._data.astype(dtype)
|
846 |
+
|
847 |
+
def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
|
848 |
+
return PandasArrayExtensionArray(self._data, copy=True)
|
849 |
+
|
850 |
+
@classmethod
|
851 |
+
def _from_sequence(
|
852 |
+
cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
|
853 |
+
) -> "PandasArrayExtensionArray":
|
854 |
+
if len(scalars) > 1 and all(
|
855 |
+
isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
|
856 |
+
):
|
857 |
+
data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
|
858 |
+
else:
|
859 |
+
data = np.empty(len(scalars), dtype=object)
|
860 |
+
data[:] = scalars
|
861 |
+
return cls(data, copy=copy)
|
862 |
+
|
863 |
+
@classmethod
|
864 |
+
def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
|
865 |
+
if len(to_concat) > 1 and all(
|
866 |
+
va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
|
867 |
+
for va in to_concat
|
868 |
+
):
|
869 |
+
data = np.vstack([va._data for va in to_concat])
|
870 |
+
else:
|
871 |
+
data = np.empty(len(to_concat), dtype=object)
|
872 |
+
data[:] = [va._data for va in to_concat]
|
873 |
+
return cls(data, copy=False)
|
874 |
+
|
875 |
+
@property
|
876 |
+
def dtype(self) -> PandasArrayExtensionDtype:
|
877 |
+
return self._dtype
|
878 |
+
|
879 |
+
@property
|
880 |
+
def nbytes(self) -> int:
|
881 |
+
return self._data.nbytes
|
882 |
+
|
883 |
+
def isna(self) -> np.ndarray:
|
884 |
+
return np.array([pd.isna(arr).any() for arr in self._data])
|
885 |
+
|
886 |
+
def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
|
887 |
+
raise NotImplementedError()
|
888 |
+
|
889 |
+
def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
|
890 |
+
if isinstance(item, int):
|
891 |
+
return self._data[item]
|
892 |
+
return PandasArrayExtensionArray(self._data[item], copy=False)
|
893 |
+
|
894 |
+
def take(
|
895 |
+
self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
|
896 |
+
) -> "PandasArrayExtensionArray":
|
897 |
+
indices: np.ndarray = np.asarray(indices, dtype=int)
|
898 |
+
if allow_fill:
|
899 |
+
fill_value = (
|
900 |
+
self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
|
901 |
+
)
|
902 |
+
mask = indices == -1
|
903 |
+
if (indices < -1).any():
|
904 |
+
raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
|
905 |
+
elif len(self) > 0:
|
906 |
+
pass
|
907 |
+
elif not np.all(mask):
|
908 |
+
raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
|
909 |
+
else:
|
910 |
+
data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
|
911 |
+
return PandasArrayExtensionArray(data, copy=False)
|
912 |
+
took = self._data.take(indices, axis=0)
|
913 |
+
if allow_fill and mask.any():
|
914 |
+
took[mask] = [fill_value] * np.sum(mask)
|
915 |
+
return PandasArrayExtensionArray(took, copy=False)
|
916 |
+
|
917 |
+
def __len__(self) -> int:
|
918 |
+
return len(self._data)
|
919 |
+
|
920 |
+
def __eq__(self, other) -> np.ndarray:
|
921 |
+
if not isinstance(other, PandasArrayExtensionArray):
|
922 |
+
raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
|
923 |
+
return (self._data == other._data).all()
|
924 |
+
|
925 |
+
|
926 |
+
def pandas_types_mapper(dtype):
|
927 |
+
if isinstance(dtype, _ArrayXDExtensionType):
|
928 |
+
return PandasArrayExtensionDtype(dtype.value_type)
|
929 |
+
|
930 |
+
|
931 |
+
@dataclass
|
932 |
+
class ClassLabel:
|
933 |
+
"""Feature type for integer class labels.
|
934 |
+
|
935 |
+
There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
|
936 |
+
|
937 |
+
* `num_classes`: Create 0 to (num_classes-1) labels.
|
938 |
+
* `names`: List of label strings.
|
939 |
+
* `names_file`: File containing the list of labels.
|
940 |
+
|
941 |
+
Under the hood the labels are stored as integers.
|
942 |
+
You can use negative integers to represent unknown/missing labels.
|
943 |
+
|
944 |
+
Args:
|
945 |
+
num_classes (`int`, *optional*):
|
946 |
+
Number of classes. All labels must be < `num_classes`.
|
947 |
+
names (`list` of `str`, *optional*):
|
948 |
+
String names for the integer classes.
|
949 |
+
The order in which the names are provided is kept.
|
950 |
+
names_file (`str`, *optional*):
|
951 |
+
Path to a file with names for the integer classes, one per line.
|
952 |
+
|
953 |
+
Example:
|
954 |
+
|
955 |
+
```py
|
956 |
+
>>> from datasets import Features
|
957 |
+
>>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
|
958 |
+
>>> features
|
959 |
+
{'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
|
960 |
+
```
|
961 |
+
"""
|
962 |
+
|
963 |
+
num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
|
964 |
+
names: List[str] = None
|
965 |
+
names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
|
966 |
+
id: Optional[str] = None
|
967 |
+
# Automatically constructed
|
968 |
+
dtype: ClassVar[str] = "int64"
|
969 |
+
pa_type: ClassVar[Any] = pa.int64()
|
970 |
+
_str2int: ClassVar[Dict[str, int]] = None
|
971 |
+
_int2str: ClassVar[Dict[int, int]] = None
|
972 |
+
_type: str = field(default="ClassLabel", init=False, repr=False)
|
973 |
+
|
974 |
+
def __post_init__(self, num_classes, names_file):
|
975 |
+
self.num_classes = num_classes
|
976 |
+
self.names_file = names_file
|
977 |
+
if self.names_file is not None and self.names is not None:
|
978 |
+
raise ValueError("Please provide either names or names_file but not both.")
|
979 |
+
# Set self.names
|
980 |
+
if self.names is None:
|
981 |
+
if self.names_file is not None:
|
982 |
+
self.names = self._load_names_from_file(self.names_file)
|
983 |
+
elif self.num_classes is not None:
|
984 |
+
self.names = [str(i) for i in range(self.num_classes)]
|
985 |
+
else:
|
986 |
+
raise ValueError("Please provide either num_classes, names or names_file.")
|
987 |
+
elif not isinstance(self.names, SequenceABC):
|
988 |
+
raise TypeError(f"Please provide names as a list, is {type(self.names)}")
|
989 |
+
# Set self.num_classes
|
990 |
+
if self.num_classes is None:
|
991 |
+
self.num_classes = len(self.names)
|
992 |
+
elif self.num_classes != len(self.names):
|
993 |
+
raise ValueError(
|
994 |
+
"ClassLabel number of names do not match the defined num_classes. "
|
995 |
+
f"Got {len(self.names)} names VS {self.num_classes} num_classes"
|
996 |
+
)
|
997 |
+
# Prepare mappings
|
998 |
+
self._int2str = [str(name) for name in self.names]
|
999 |
+
self._str2int = {name: i for i, name in enumerate(self._int2str)}
|
1000 |
+
if len(self._int2str) != len(self._str2int):
|
1001 |
+
raise ValueError("Some label names are duplicated. Each label name should be unique.")
|
1002 |
+
|
1003 |
+
def __call__(self):
|
1004 |
+
return self.pa_type
|
1005 |
+
|
1006 |
+
def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
|
1007 |
+
"""Conversion class name `string` => `integer`.
|
1008 |
+
|
1009 |
+
Example:
|
1010 |
+
|
1011 |
+
```py
|
1012 |
+
>>> from datasets import load_dataset
|
1013 |
+
>>> ds = load_dataset("rotten_tomatoes", split="train")
|
1014 |
+
>>> ds.features["label"].str2int('neg')
|
1015 |
+
0
|
1016 |
+
```
|
1017 |
+
"""
|
1018 |
+
if not isinstance(values, str) and not isinstance(values, Iterable):
|
1019 |
+
raise ValueError(
|
1020 |
+
f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
|
1021 |
+
)
|
1022 |
+
return_list = True
|
1023 |
+
if isinstance(values, str):
|
1024 |
+
values = [values]
|
1025 |
+
return_list = False
|
1026 |
+
|
1027 |
+
output = [self._strval2int(value) for value in values]
|
1028 |
+
return output if return_list else output[0]
|
1029 |
+
|
1030 |
+
def _strval2int(self, value: str) -> int:
|
1031 |
+
failed_parse = False
|
1032 |
+
value = str(value)
|
1033 |
+
# first attempt - raw string value
|
1034 |
+
int_value = self._str2int.get(value)
|
1035 |
+
if int_value is None:
|
1036 |
+
# second attempt - strip whitespace
|
1037 |
+
int_value = self._str2int.get(value.strip())
|
1038 |
+
if int_value is None:
|
1039 |
+
# third attempt - convert str to int
|
1040 |
+
try:
|
1041 |
+
int_value = int(value)
|
1042 |
+
except ValueError:
|
1043 |
+
failed_parse = True
|
1044 |
+
else:
|
1045 |
+
if int_value < -1 or int_value >= self.num_classes:
|
1046 |
+
failed_parse = True
|
1047 |
+
if failed_parse:
|
1048 |
+
raise ValueError(f"Invalid string class label {value}")
|
1049 |
+
return int_value
|
1050 |
+
|
1051 |
+
def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
|
1052 |
+
"""Conversion `integer` => class name `string`.
|
1053 |
+
|
1054 |
+
Regarding unknown/missing labels: passing negative integers raises `ValueError`.
|
1055 |
+
|
1056 |
+
Example:
|
1057 |
+
|
1058 |
+
```py
|
1059 |
+
>>> from datasets import load_dataset
|
1060 |
+
>>> ds = load_dataset("rotten_tomatoes", split="train")
|
1061 |
+
>>> ds.features["label"].int2str(0)
|
1062 |
+
'neg'
|
1063 |
+
```
|
1064 |
+
"""
|
1065 |
+
if not isinstance(values, int) and not isinstance(values, Iterable):
|
1066 |
+
raise ValueError(
|
1067 |
+
f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
|
1068 |
+
)
|
1069 |
+
return_list = True
|
1070 |
+
if isinstance(values, int):
|
1071 |
+
values = [values]
|
1072 |
+
return_list = False
|
1073 |
+
|
1074 |
+
for v in values:
|
1075 |
+
if not 0 <= v < self.num_classes:
|
1076 |
+
raise ValueError(f"Invalid integer class label {v:d}")
|
1077 |
+
|
1078 |
+
output = [self._int2str[int(v)] for v in values]
|
1079 |
+
return output if return_list else output[0]
|
1080 |
+
|
1081 |
+
def encode_example(self, example_data):
|
1082 |
+
if self.num_classes is None:
|
1083 |
+
raise ValueError(
|
1084 |
+
"Trying to use ClassLabel feature with undefined number of class. "
|
1085 |
+
"Please set ClassLabel.names or num_classes."
|
1086 |
+
)
|
1087 |
+
|
1088 |
+
# If a string is given, convert to associated integer
|
1089 |
+
if isinstance(example_data, str):
|
1090 |
+
example_data = self.str2int(example_data)
|
1091 |
+
|
1092 |
+
# Allowing -1 to mean no label.
|
1093 |
+
if not -1 <= example_data < self.num_classes:
|
1094 |
+
raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
|
1095 |
+
return example_data
|
1096 |
+
|
1097 |
+
def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
|
1098 |
+
"""Cast an Arrow array to the `ClassLabel` arrow storage type.
|
1099 |
+
The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
|
1100 |
+
|
1101 |
+
- `pa.string()`
|
1102 |
+
- `pa.int()`
|
1103 |
+
|
1104 |
+
Args:
|
1105 |
+
storage (`Union[pa.StringArray, pa.IntegerArray]`):
|
1106 |
+
PyArrow array to cast.
|
1107 |
+
|
1108 |
+
Returns:
|
1109 |
+
`pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
|
1110 |
+
"""
|
1111 |
+
if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
|
1112 |
+
min_max = pc.min_max(storage).as_py()
|
1113 |
+
if min_max["max"] is not None and min_max["max"] >= self.num_classes:
|
1114 |
+
raise ValueError(
|
1115 |
+
f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
|
1116 |
+
)
|
1117 |
+
elif isinstance(storage, pa.StringArray):
|
1118 |
+
storage = pa.array(
|
1119 |
+
[self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
|
1120 |
+
)
|
1121 |
+
return array_cast(storage, self.pa_type)
|
1122 |
+
|
1123 |
+
@staticmethod
|
1124 |
+
def _load_names_from_file(names_filepath):
|
1125 |
+
with open(names_filepath, encoding="utf-8") as f:
|
1126 |
+
return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
|
1127 |
+
|
1128 |
+
|
1129 |
+
@dataclass
|
1130 |
+
class Sequence:
|
1131 |
+
"""Construct a list of feature from a single type or a dict of types.
|
1132 |
+
Mostly here for compatiblity with tfds.
|
1133 |
+
|
1134 |
+
Args:
|
1135 |
+
feature:
|
1136 |
+
A list of features of a single type or a dictionary of types.
|
1137 |
+
length (`int`):
|
1138 |
+
Length of the sequence.
|
1139 |
+
|
1140 |
+
Example:
|
1141 |
+
|
1142 |
+
```py
|
1143 |
+
>>> from datasets import Features, Sequence, Value, ClassLabel
|
1144 |
+
>>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
|
1145 |
+
>>> features
|
1146 |
+
{'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
|
1147 |
+
```
|
1148 |
+
"""
|
1149 |
+
|
1150 |
+
feature: Any
|
1151 |
+
length: int = -1
|
1152 |
+
id: Optional[str] = None
|
1153 |
+
# Automatically constructed
|
1154 |
+
dtype: ClassVar[str] = "list"
|
1155 |
+
pa_type: ClassVar[Any] = None
|
1156 |
+
_type: str = field(default="Sequence", init=False, repr=False)
|
1157 |
+
|
1158 |
+
|
1159 |
+
FeatureType = Union[
|
1160 |
+
dict,
|
1161 |
+
list,
|
1162 |
+
tuple,
|
1163 |
+
Value,
|
1164 |
+
ClassLabel,
|
1165 |
+
Translation,
|
1166 |
+
TranslationVariableLanguages,
|
1167 |
+
Sequence,
|
1168 |
+
Array2D,
|
1169 |
+
Array3D,
|
1170 |
+
Array4D,
|
1171 |
+
Array5D,
|
1172 |
+
Audio,
|
1173 |
+
Image,
|
1174 |
+
]
|
1175 |
+
|
1176 |
+
|
1177 |
+
def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
|
1178 |
+
"""
|
1179 |
+
Check if the object is not None.
|
1180 |
+
If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
|
1181 |
+
"""
|
1182 |
+
if obj is None:
|
1183 |
+
return False
|
1184 |
+
elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
|
1185 |
+
if len(obj) > 0:
|
1186 |
+
if schema is None:
|
1187 |
+
pass
|
1188 |
+
elif isinstance(schema, (list, tuple)):
|
1189 |
+
schema = schema[0]
|
1190 |
+
else:
|
1191 |
+
schema = schema.feature
|
1192 |
+
return _check_non_null_non_empty_recursive(obj[0], schema)
|
1193 |
+
else:
|
1194 |
+
return False
|
1195 |
+
else:
|
1196 |
+
return True
|
1197 |
+
|
1198 |
+
|
1199 |
+
def get_nested_type(schema: FeatureType) -> pa.DataType:
|
1200 |
+
"""
|
1201 |
+
get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
|
1202 |
+
generate_from_arrow_type().
|
1203 |
+
|
1204 |
+
It performs double-duty as the implementation of Features.type and handles the conversion of
|
1205 |
+
datasets.Feature->pa.struct
|
1206 |
+
"""
|
1207 |
+
# Nested structures: we allow dict, list/tuples, sequences
|
1208 |
+
if isinstance(schema, Features):
|
1209 |
+
return pa.struct(
|
1210 |
+
{key: get_nested_type(schema[key]) for key in schema}
|
1211 |
+
) # Features is subclass of dict, and dict order is deterministic since Python 3.6
|
1212 |
+
elif isinstance(schema, dict):
|
1213 |
+
return pa.struct(
|
1214 |
+
{key: get_nested_type(schema[key]) for key in schema}
|
1215 |
+
) # however don't sort on struct types since the order matters
|
1216 |
+
elif isinstance(schema, (list, tuple)):
|
1217 |
+
if len(schema) != 1:
|
1218 |
+
raise ValueError("When defining list feature, you should just provide one example of the inner type")
|
1219 |
+
value_type = get_nested_type(schema[0])
|
1220 |
+
return pa.list_(value_type)
|
1221 |
+
elif isinstance(schema, Sequence):
|
1222 |
+
value_type = get_nested_type(schema.feature)
|
1223 |
+
# We allow to reverse list of dict => dict of list for compatibility with tfds
|
1224 |
+
if isinstance(schema.feature, dict):
|
1225 |
+
return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
|
1226 |
+
return pa.list_(value_type, schema.length)
|
1227 |
+
|
1228 |
+
# Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
|
1229 |
+
return schema()
|
1230 |
+
|
1231 |
+
|
1232 |
+
def encode_nested_example(schema, obj, level=0):
|
1233 |
+
"""Encode a nested example.
|
1234 |
+
This is used since some features (in particular ClassLabel) have some logic during encoding.
|
1235 |
+
|
1236 |
+
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
|
1237 |
+
If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
|
1238 |
+
"""
|
1239 |
+
# Nested structures: we allow dict, list/tuples, sequences
|
1240 |
+
if isinstance(schema, dict):
|
1241 |
+
if level == 0 and obj is None:
|
1242 |
+
raise ValueError("Got None but expected a dictionary instead")
|
1243 |
+
return (
|
1244 |
+
{k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
|
1245 |
+
if obj is not None
|
1246 |
+
else None
|
1247 |
+
)
|
1248 |
+
|
1249 |
+
elif isinstance(schema, (list, tuple)):
|
1250 |
+
sub_schema = schema[0]
|
1251 |
+
if obj is None:
|
1252 |
+
return None
|
1253 |
+
else:
|
1254 |
+
if len(obj) > 0:
|
1255 |
+
for first_elmt in obj:
|
1256 |
+
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
|
1257 |
+
break
|
1258 |
+
if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
|
1259 |
+
return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
|
1260 |
+
return list(obj)
|
1261 |
+
elif isinstance(schema, Sequence):
|
1262 |
+
if obj is None:
|
1263 |
+
return None
|
1264 |
+
# We allow to reverse list of dict => dict of list for compatiblity with tfds
|
1265 |
+
if isinstance(schema.feature, dict):
|
1266 |
+
# dict of list to fill
|
1267 |
+
list_dict = {}
|
1268 |
+
if isinstance(obj, (list, tuple)):
|
1269 |
+
# obj is a list of dict
|
1270 |
+
for k in schema.feature:
|
1271 |
+
list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
|
1272 |
+
return list_dict
|
1273 |
+
else:
|
1274 |
+
# obj is a single dict
|
1275 |
+
for k in schema.feature:
|
1276 |
+
list_dict[k] = (
|
1277 |
+
[encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
|
1278 |
+
if k in obj
|
1279 |
+
else None
|
1280 |
+
)
|
1281 |
+
return list_dict
|
1282 |
+
# schema.feature is not a dict
|
1283 |
+
if isinstance(obj, str): # don't interpret a string as a list
|
1284 |
+
raise ValueError(f"Got a string but expected a list instead: '{obj}'")
|
1285 |
+
else:
|
1286 |
+
if len(obj) > 0:
|
1287 |
+
for first_elmt in obj:
|
1288 |
+
if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
|
1289 |
+
break
|
1290 |
+
# be careful when comparing tensors here
|
1291 |
+
if (
|
1292 |
+
not isinstance(first_elmt, list)
|
1293 |
+
or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
|
1294 |
+
):
|
1295 |
+
return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
|
1296 |
+
return list(obj)
|
1297 |
+
# Object with special encoding:
|
1298 |
+
# ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
|
1299 |
+
elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
|
1300 |
+
return schema.encode_example(obj) if obj is not None else None
|
1301 |
+
# Other object should be directly convertible to a native Arrow type (like Translation and Translation)
|
1302 |
+
return obj
|
1303 |
+
|
1304 |
+
|
1305 |
+
def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
|
1306 |
+
"""Decode a nested example.
|
1307 |
+
This is used since some features (in particular Audio and Image) have some logic during decoding.
|
1308 |
+
|
1309 |
+
To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
|
1310 |
+
If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
|
1311 |
+
"""
|
1312 |
+
# Nested structures: we allow dict, list/tuples, sequences
|
1313 |
+
if isinstance(schema, dict):
|
1314 |
+
return (
|
1315 |
+
{k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
|
1316 |
+
if obj is not None
|
1317 |
+
else None
|
1318 |
+
)
|
1319 |
+
elif isinstance(schema, (list, tuple)):
|
1320 |
+
sub_schema = schema[0]
|
1321 |
+
if obj is None:
|
1322 |
+
return None
|
1323 |
+
else:
|
1324 |
+
if len(obj) > 0:
|
1325 |
+
for first_elmt in obj:
|
1326 |
+
if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
|
1327 |
+
break
|
1328 |
+
if decode_nested_example(sub_schema, first_elmt) != first_elmt:
|
1329 |
+
return [decode_nested_example(sub_schema, o) for o in obj]
|
1330 |
+
return list(obj)
|
1331 |
+
elif isinstance(schema, Sequence):
|
1332 |
+
# We allow to reverse list of dict => dict of list for compatiblity with tfds
|
1333 |
+
if isinstance(schema.feature, dict):
|
1334 |
+
return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
|
1335 |
+
else:
|
1336 |
+
return decode_nested_example([schema.feature], obj)
|
1337 |
+
# Object with special decoding:
|
1338 |
+
elif isinstance(schema, (Audio, Image)):
|
1339 |
+
# we pass the token to read and decode files from private repositories in streaming mode
|
1340 |
+
if obj is not None and schema.decode:
|
1341 |
+
return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
|
1342 |
+
return obj
|
1343 |
+
|
1344 |
+
|
1345 |
+
_FEATURE_TYPES: Dict[str, FeatureType] = {
|
1346 |
+
Value.__name__: Value,
|
1347 |
+
ClassLabel.__name__: ClassLabel,
|
1348 |
+
Translation.__name__: Translation,
|
1349 |
+
TranslationVariableLanguages.__name__: TranslationVariableLanguages,
|
1350 |
+
Sequence.__name__: Sequence,
|
1351 |
+
Array2D.__name__: Array2D,
|
1352 |
+
Array3D.__name__: Array3D,
|
1353 |
+
Array4D.__name__: Array4D,
|
1354 |
+
Array5D.__name__: Array5D,
|
1355 |
+
Audio.__name__: Audio,
|
1356 |
+
Image.__name__: Image,
|
1357 |
+
}
|
1358 |
+
|
1359 |
+
|
1360 |
+
@experimental
|
1361 |
+
def register_feature(
|
1362 |
+
feature_cls: type,
|
1363 |
+
feature_type: str,
|
1364 |
+
):
|
1365 |
+
"""
|
1366 |
+
Register a Feature object using a name and class.
|
1367 |
+
This function must be used on a Feature class.
|
1368 |
+
"""
|
1369 |
+
if feature_type in _FEATURE_TYPES:
|
1370 |
+
logger.warning(
|
1371 |
+
f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})"
|
1372 |
+
)
|
1373 |
+
_FEATURE_TYPES[feature_type] = feature_cls
|
1374 |
+
|
1375 |
+
|
1376 |
+
def generate_from_dict(obj: Any):
|
1377 |
+
"""Regenerate the nested feature object from a deserialized dict.
|
1378 |
+
We use the '_type' fields to get the dataclass name to load.
|
1379 |
+
|
1380 |
+
generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
|
1381 |
+
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
|
1382 |
+
a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
|
1383 |
+
:meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
|
1384 |
+
mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
|
1385 |
+
that :class:`Value` automatically performs.
|
1386 |
+
"""
|
1387 |
+
# Nested structures: we allow dict, list/tuples, sequences
|
1388 |
+
if isinstance(obj, list):
|
1389 |
+
return [generate_from_dict(value) for value in obj]
|
1390 |
+
# Otherwise we have a dict or a dataclass
|
1391 |
+
if "_type" not in obj or isinstance(obj["_type"], dict):
|
1392 |
+
return {key: generate_from_dict(value) for key, value in obj.items()}
|
1393 |
+
obj = dict(obj)
|
1394 |
+
_type = obj.pop("_type")
|
1395 |
+
class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None)
|
1396 |
+
|
1397 |
+
if class_type is None:
|
1398 |
+
raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}")
|
1399 |
+
|
1400 |
+
if class_type == Sequence:
|
1401 |
+
return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
|
1402 |
+
|
1403 |
+
field_names = {f.name for f in fields(class_type)}
|
1404 |
+
return class_type(**{k: v for k, v in obj.items() if k in field_names})
|
1405 |
+
|
1406 |
+
|
1407 |
+
def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
|
1408 |
+
"""
|
1409 |
+
generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
|
1410 |
+
a single field.
|
1411 |
+
|
1412 |
+
This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
|
1413 |
+
|
1414 |
+
This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
|
1415 |
+
full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
|
1416 |
+
"""
|
1417 |
+
if isinstance(pa_type, pa.StructType):
|
1418 |
+
return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
|
1419 |
+
elif isinstance(pa_type, pa.FixedSizeListType):
|
1420 |
+
return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
|
1421 |
+
elif isinstance(pa_type, pa.ListType):
|
1422 |
+
feature = generate_from_arrow_type(pa_type.value_type)
|
1423 |
+
if isinstance(feature, (dict, tuple, list)):
|
1424 |
+
return [feature]
|
1425 |
+
return Sequence(feature=feature)
|
1426 |
+
elif isinstance(pa_type, _ArrayXDExtensionType):
|
1427 |
+
array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
|
1428 |
+
return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
|
1429 |
+
elif isinstance(pa_type, pa.DictionaryType):
|
1430 |
+
raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
|
1431 |
+
elif isinstance(pa_type, pa.DataType):
|
1432 |
+
return Value(dtype=_arrow_to_datasets_dtype(pa_type))
|
1433 |
+
else:
|
1434 |
+
raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
|
1435 |
+
|
1436 |
+
|
1437 |
+
def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
|
1438 |
+
"""Build a PyArrow ListArray from a multidimensional NumPy array"""
|
1439 |
+
arr = np.array(arr)
|
1440 |
+
values = pa.array(arr.flatten(), type=type)
|
1441 |
+
for i in range(arr.ndim - 1):
|
1442 |
+
n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
|
1443 |
+
step_offsets = arr.shape[arr.ndim - i - 1]
|
1444 |
+
offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
|
1445 |
+
values = pa.ListArray.from_arrays(offsets, values)
|
1446 |
+
return values
|
1447 |
+
|
1448 |
+
|
1449 |
+
def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
|
1450 |
+
null_mask = np.array([arr is None for arr in l_arr])
|
1451 |
+
null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
|
1452 |
+
l_arr = [arr for arr in l_arr if arr is not None]
|
1453 |
+
offsets = np.cumsum(
|
1454 |
+
[0] + [len(arr) for arr in l_arr], dtype=object
|
1455 |
+
) # convert to dtype object to allow None insertion
|
1456 |
+
offsets = np.insert(offsets, null_indices, None)
|
1457 |
+
offsets = pa.array(offsets, type=pa.int32())
|
1458 |
+
values = pa.concat_arrays(l_arr)
|
1459 |
+
return pa.ListArray.from_arrays(offsets, values)
|
1460 |
+
|
1461 |
+
|
1462 |
+
def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
|
1463 |
+
"""Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
|
1464 |
+
if len(l_arr) > 0:
|
1465 |
+
return list_of_pa_arrays_to_pyarrow_listarray(
|
1466 |
+
[numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
|
1467 |
+
)
|
1468 |
+
else:
|
1469 |
+
return pa.array([], type=type)
|
1470 |
+
|
1471 |
+
|
1472 |
+
def contains_any_np_array(data: Any):
|
1473 |
+
"""Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
|
1474 |
+
|
1475 |
+
Args:
|
1476 |
+
data (Any): Data.
|
1477 |
+
|
1478 |
+
Returns:
|
1479 |
+
bool
|
1480 |
+
"""
|
1481 |
+
if isinstance(data, np.ndarray):
|
1482 |
+
return True
|
1483 |
+
elif isinstance(data, list):
|
1484 |
+
return contains_any_np_array(first_non_null_value(data)[1])
|
1485 |
+
else:
|
1486 |
+
return False
|
1487 |
+
|
1488 |
+
|
1489 |
+
def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
|
1490 |
+
"""Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
|
1491 |
+
|
1492 |
+
Args:
|
1493 |
+
data (Union[np.ndarray, List]): Data.
|
1494 |
+
type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
|
1495 |
+
|
1496 |
+
Returns:
|
1497 |
+
pa.ListArray
|
1498 |
+
"""
|
1499 |
+
if isinstance(data, np.ndarray):
|
1500 |
+
return numpy_to_pyarrow_listarray(data, type=type)
|
1501 |
+
elif isinstance(data, list):
|
1502 |
+
return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
|
1503 |
+
|
1504 |
+
|
1505 |
+
def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
|
1506 |
+
"""Convert to PyArrow ListArray.
|
1507 |
+
|
1508 |
+
Args:
|
1509 |
+
data (Any): Sequence, iterable, np.ndarray or pd.Series.
|
1510 |
+
pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
|
1511 |
+
|
1512 |
+
Returns:
|
1513 |
+
pyarrow.Array
|
1514 |
+
"""
|
1515 |
+
if contains_any_np_array(data):
|
1516 |
+
return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
|
1517 |
+
else:
|
1518 |
+
return pa.array(data, pa_type.storage_dtype)
|
1519 |
+
|
1520 |
+
|
1521 |
+
def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
|
1522 |
+
"""Visit a (possibly nested) feature.
|
1523 |
+
|
1524 |
+
Args:
|
1525 |
+
feature (FeatureType): the feature type to be checked
|
1526 |
+
Returns:
|
1527 |
+
visited feature (FeatureType)
|
1528 |
+
"""
|
1529 |
+
if isinstance(feature, dict):
|
1530 |
+
out = func({k: _visit(f, func) for k, f in feature.items()})
|
1531 |
+
elif isinstance(feature, (list, tuple)):
|
1532 |
+
out = func([_visit(feature[0], func)])
|
1533 |
+
elif isinstance(feature, Sequence):
|
1534 |
+
out = func(Sequence(_visit(feature.feature, func), length=feature.length))
|
1535 |
+
else:
|
1536 |
+
out = func(feature)
|
1537 |
+
return feature if out is None else out
|
1538 |
+
|
1539 |
+
|
1540 |
+
def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
|
1541 |
+
"""Check if a (possibly nested) feature requires decoding.
|
1542 |
+
|
1543 |
+
Args:
|
1544 |
+
feature (FeatureType): the feature type to be checked
|
1545 |
+
ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
|
1546 |
+
of the `decode` attribute of the decodable feature types.
|
1547 |
+
Returns:
|
1548 |
+
:obj:`bool`
|
1549 |
+
"""
|
1550 |
+
if isinstance(feature, dict):
|
1551 |
+
return any(require_decoding(f) for f in feature.values())
|
1552 |
+
elif isinstance(feature, (list, tuple)):
|
1553 |
+
return require_decoding(feature[0])
|
1554 |
+
elif isinstance(feature, Sequence):
|
1555 |
+
return require_decoding(feature.feature)
|
1556 |
+
else:
|
1557 |
+
return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
|
1558 |
+
|
1559 |
+
|
1560 |
+
def require_storage_cast(feature: FeatureType) -> bool:
|
1561 |
+
"""Check if a (possibly nested) feature requires storage casting.
|
1562 |
+
|
1563 |
+
Args:
|
1564 |
+
feature (FeatureType): the feature type to be checked
|
1565 |
+
Returns:
|
1566 |
+
:obj:`bool`
|
1567 |
+
"""
|
1568 |
+
if isinstance(feature, dict):
|
1569 |
+
return any(require_storage_cast(f) for f in feature.values())
|
1570 |
+
elif isinstance(feature, (list, tuple)):
|
1571 |
+
return require_storage_cast(feature[0])
|
1572 |
+
elif isinstance(feature, Sequence):
|
1573 |
+
return require_storage_cast(feature.feature)
|
1574 |
+
else:
|
1575 |
+
return hasattr(feature, "cast_storage")
|
1576 |
+
|
1577 |
+
|
1578 |
+
def require_storage_embed(feature: FeatureType) -> bool:
|
1579 |
+
"""Check if a (possibly nested) feature requires embedding data into storage.
|
1580 |
+
|
1581 |
+
Args:
|
1582 |
+
feature (FeatureType): the feature type to be checked
|
1583 |
+
Returns:
|
1584 |
+
:obj:`bool`
|
1585 |
+
"""
|
1586 |
+
if isinstance(feature, dict):
|
1587 |
+
return any(require_storage_cast(f) for f in feature.values())
|
1588 |
+
elif isinstance(feature, (list, tuple)):
|
1589 |
+
return require_storage_cast(feature[0])
|
1590 |
+
elif isinstance(feature, Sequence):
|
1591 |
+
return require_storage_cast(feature.feature)
|
1592 |
+
else:
|
1593 |
+
return hasattr(feature, "embed_storage")
|
1594 |
+
|
1595 |
+
|
1596 |
+
def keep_features_dicts_synced(func):
|
1597 |
+
"""
|
1598 |
+
Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
|
1599 |
+
in sync with the main dictionary.
|
1600 |
+
"""
|
1601 |
+
|
1602 |
+
@wraps(func)
|
1603 |
+
def wrapper(*args, **kwargs):
|
1604 |
+
if args:
|
1605 |
+
self: "Features" = args[0]
|
1606 |
+
args = args[1:]
|
1607 |
+
else:
|
1608 |
+
self: "Features" = kwargs.pop("self")
|
1609 |
+
out = func(self, *args, **kwargs)
|
1610 |
+
assert hasattr(self, "_column_requires_decoding")
|
1611 |
+
self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
|
1612 |
+
return out
|
1613 |
+
|
1614 |
+
wrapper._decorator_name_ = "_keep_dicts_synced"
|
1615 |
+
return wrapper
|
1616 |
+
|
1617 |
+
|
1618 |
+
class Features(dict):
|
1619 |
+
"""A special dictionary that defines the internal structure of a dataset.
|
1620 |
+
|
1621 |
+
Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
|
1622 |
+
and values are the type of that column.
|
1623 |
+
|
1624 |
+
`FieldType` can be one of the following:
|
1625 |
+
- a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
|
1626 |
+
- a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
|
1627 |
+
associated to them and will be stored as integers in the dataset.
|
1628 |
+
- a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
|
1629 |
+
features. It's possible to have nested fields of nested fields in an arbitrary manner.
|
1630 |
+
- a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
|
1631 |
+
`list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
|
1632 |
+
type hosted in this list.
|
1633 |
+
|
1634 |
+
<Tip>
|
1635 |
+
|
1636 |
+
A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
|
1637 |
+
lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
|
1638 |
+
un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
|
1639 |
+
[`~datasets.Sequence`].
|
1640 |
+
|
1641 |
+
</Tip>
|
1642 |
+
|
1643 |
+
- a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
|
1644 |
+
- an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
|
1645 |
+
to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
|
1646 |
+
- an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
|
1647 |
+
or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
|
1648 |
+
- [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
|
1649 |
+
"""
|
1650 |
+
|
1651 |
+
def __init__(*args, **kwargs):
|
1652 |
+
# self not in the signature to allow passing self as a kwarg
|
1653 |
+
if not args:
|
1654 |
+
raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
|
1655 |
+
self, *args = args
|
1656 |
+
super(Features, self).__init__(*args, **kwargs)
|
1657 |
+
self._column_requires_decoding: Dict[str, bool] = {
|
1658 |
+
col: require_decoding(feature) for col, feature in self.items()
|
1659 |
+
}
|
1660 |
+
|
1661 |
+
__setitem__ = keep_features_dicts_synced(dict.__setitem__)
|
1662 |
+
__delitem__ = keep_features_dicts_synced(dict.__delitem__)
|
1663 |
+
update = keep_features_dicts_synced(dict.update)
|
1664 |
+
setdefault = keep_features_dicts_synced(dict.setdefault)
|
1665 |
+
pop = keep_features_dicts_synced(dict.pop)
|
1666 |
+
popitem = keep_features_dicts_synced(dict.popitem)
|
1667 |
+
clear = keep_features_dicts_synced(dict.clear)
|
1668 |
+
|
1669 |
+
def __reduce__(self):
|
1670 |
+
return Features, (dict(self),)
|
1671 |
+
|
1672 |
+
@property
|
1673 |
+
def type(self):
|
1674 |
+
"""
|
1675 |
+
Features field types.
|
1676 |
+
|
1677 |
+
Returns:
|
1678 |
+
:obj:`pyarrow.DataType`
|
1679 |
+
"""
|
1680 |
+
return get_nested_type(self)
|
1681 |
+
|
1682 |
+
@property
|
1683 |
+
def arrow_schema(self):
|
1684 |
+
"""
|
1685 |
+
Features schema.
|
1686 |
+
|
1687 |
+
Returns:
|
1688 |
+
:obj:`pyarrow.Schema`
|
1689 |
+
"""
|
1690 |
+
hf_metadata = {"info": {"features": self.to_dict()}}
|
1691 |
+
return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
|
1692 |
+
|
1693 |
+
@classmethod
|
1694 |
+
def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
|
1695 |
+
"""
|
1696 |
+
Construct [`Features`] from Arrow Schema.
|
1697 |
+
It also checks the schema metadata for Hugging Face Datasets features.
|
1698 |
+
Non-nullable fields are not supported and set to nullable.
|
1699 |
+
|
1700 |
+
Args:
|
1701 |
+
pa_schema (`pyarrow.Schema`):
|
1702 |
+
Arrow Schema.
|
1703 |
+
|
1704 |
+
Returns:
|
1705 |
+
[`Features`]
|
1706 |
+
"""
|
1707 |
+
# try to load features from the arrow schema metadata
|
1708 |
+
metadata_features = Features()
|
1709 |
+
if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
|
1710 |
+
metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
|
1711 |
+
if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
|
1712 |
+
metadata_features = Features.from_dict(metadata["info"]["features"])
|
1713 |
+
metadata_features_schema = metadata_features.arrow_schema
|
1714 |
+
obj = {
|
1715 |
+
field.name: (
|
1716 |
+
metadata_features[field.name]
|
1717 |
+
if field.name in metadata_features and metadata_features_schema.field(field.name) == field
|
1718 |
+
else generate_from_arrow_type(field.type)
|
1719 |
+
)
|
1720 |
+
for field in pa_schema
|
1721 |
+
}
|
1722 |
+
return cls(**obj)
|
1723 |
+
|
1724 |
+
@classmethod
|
1725 |
+
def from_dict(cls, dic) -> "Features":
|
1726 |
+
"""
|
1727 |
+
Construct [`Features`] from dict.
|
1728 |
+
|
1729 |
+
Regenerate the nested feature object from a deserialized dict.
|
1730 |
+
We use the `_type` key to infer the dataclass name of the feature `FieldType`.
|
1731 |
+
|
1732 |
+
It allows for a convenient constructor syntax
|
1733 |
+
to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
|
1734 |
+
a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
|
1735 |
+
[`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
|
1736 |
+
any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
|
1737 |
+
dtypes that [`Value`] automatically performs.
|
1738 |
+
|
1739 |
+
Args:
|
1740 |
+
dic (`dict[str, Any]`):
|
1741 |
+
Python dictionary.
|
1742 |
+
|
1743 |
+
Returns:
|
1744 |
+
`Features`
|
1745 |
+
|
1746 |
+
Example::
|
1747 |
+
>>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
|
1748 |
+
{'_type': Value(dtype='string', id=None)}
|
1749 |
+
"""
|
1750 |
+
obj = generate_from_dict(dic)
|
1751 |
+
return cls(**obj)
|
1752 |
+
|
1753 |
+
def to_dict(self):
|
1754 |
+
return asdict(self)
|
1755 |
+
|
1756 |
+
def _to_yaml_list(self) -> list:
|
1757 |
+
# we compute the YAML list from the dict representation that is used for JSON dump
|
1758 |
+
yaml_data = self.to_dict()
|
1759 |
+
|
1760 |
+
def simplify(feature: dict) -> dict:
|
1761 |
+
if not isinstance(feature, dict):
|
1762 |
+
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
|
1763 |
+
|
1764 |
+
#
|
1765 |
+
# sequence: -> sequence: int32
|
1766 |
+
# dtype: int32 ->
|
1767 |
+
#
|
1768 |
+
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
|
1769 |
+
feature["sequence"] = feature["sequence"]["dtype"]
|
1770 |
+
|
1771 |
+
#
|
1772 |
+
# sequence: -> sequence:
|
1773 |
+
# struct: -> - name: foo
|
1774 |
+
# - name: foo -> dtype: int32
|
1775 |
+
# dtype: int32 ->
|
1776 |
+
#
|
1777 |
+
if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
|
1778 |
+
feature["sequence"] = feature["sequence"]["struct"]
|
1779 |
+
|
1780 |
+
#
|
1781 |
+
# list: -> list: int32
|
1782 |
+
# dtype: int32 ->
|
1783 |
+
#
|
1784 |
+
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
|
1785 |
+
feature["list"] = feature["list"]["dtype"]
|
1786 |
+
|
1787 |
+
#
|
1788 |
+
# list: -> list:
|
1789 |
+
# struct: -> - name: foo
|
1790 |
+
# - name: foo -> dtype: int32
|
1791 |
+
# dtype: int32 ->
|
1792 |
+
#
|
1793 |
+
if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
|
1794 |
+
feature["list"] = feature["list"]["struct"]
|
1795 |
+
|
1796 |
+
#
|
1797 |
+
# class_label: -> class_label:
|
1798 |
+
# names: -> names:
|
1799 |
+
# - negative -> '0': negative
|
1800 |
+
# - positive -> '1': positive
|
1801 |
+
#
|
1802 |
+
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
|
1803 |
+
# server-side requirement: keys must be strings
|
1804 |
+
feature["class_label"]["names"] = {
|
1805 |
+
str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
|
1806 |
+
}
|
1807 |
+
return feature
|
1808 |
+
|
1809 |
+
def to_yaml_inner(obj: Union[dict, list]) -> dict:
|
1810 |
+
if isinstance(obj, dict):
|
1811 |
+
_type = obj.pop("_type", None)
|
1812 |
+
if _type == "Sequence":
|
1813 |
+
_feature = obj.pop("feature")
|
1814 |
+
return simplify({"sequence": to_yaml_inner(_feature), **obj})
|
1815 |
+
elif _type == "Value":
|
1816 |
+
return obj
|
1817 |
+
elif _type and not obj:
|
1818 |
+
return {"dtype": camelcase_to_snakecase(_type)}
|
1819 |
+
elif _type:
|
1820 |
+
return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
|
1821 |
+
else:
|
1822 |
+
return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
|
1823 |
+
elif isinstance(obj, list):
|
1824 |
+
return simplify({"list": simplify(to_yaml_inner(obj[0]))})
|
1825 |
+
elif isinstance(obj, tuple):
|
1826 |
+
return to_yaml_inner(list(obj))
|
1827 |
+
else:
|
1828 |
+
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
|
1829 |
+
|
1830 |
+
def to_yaml_types(obj: dict) -> dict:
|
1831 |
+
if isinstance(obj, dict):
|
1832 |
+
return {k: to_yaml_types(v) for k, v in obj.items()}
|
1833 |
+
elif isinstance(obj, list):
|
1834 |
+
return [to_yaml_types(v) for v in obj]
|
1835 |
+
elif isinstance(obj, tuple):
|
1836 |
+
return to_yaml_types(list(obj))
|
1837 |
+
else:
|
1838 |
+
return obj
|
1839 |
+
|
1840 |
+
return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
|
1841 |
+
|
1842 |
+
@classmethod
|
1843 |
+
def _from_yaml_list(cls, yaml_data: list) -> "Features":
|
1844 |
+
yaml_data = copy.deepcopy(yaml_data)
|
1845 |
+
|
1846 |
+
# we convert the list obtained from YAML data into the dict representation that is used for JSON dump
|
1847 |
+
|
1848 |
+
def unsimplify(feature: dict) -> dict:
|
1849 |
+
if not isinstance(feature, dict):
|
1850 |
+
raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
|
1851 |
+
#
|
1852 |
+
# sequence: int32 -> sequence:
|
1853 |
+
# -> dtype: int32
|
1854 |
+
#
|
1855 |
+
if isinstance(feature.get("sequence"), str):
|
1856 |
+
feature["sequence"] = {"dtype": feature["sequence"]}
|
1857 |
+
#
|
1858 |
+
# list: int32 -> list:
|
1859 |
+
# -> dtype: int32
|
1860 |
+
#
|
1861 |
+
if isinstance(feature.get("list"), str):
|
1862 |
+
feature["list"] = {"dtype": feature["list"]}
|
1863 |
+
|
1864 |
+
#
|
1865 |
+
# class_label: -> class_label:
|
1866 |
+
# names: -> names:
|
1867 |
+
# '0': negative -> - negative
|
1868 |
+
# '1': positive -> - positive
|
1869 |
+
#
|
1870 |
+
if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
|
1871 |
+
label_ids = sorted(feature["class_label"]["names"], key=int)
|
1872 |
+
if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
|
1873 |
+
raise ValueError(
|
1874 |
+
f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
|
1875 |
+
)
|
1876 |
+
feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
|
1877 |
+
return feature
|
1878 |
+
|
1879 |
+
def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
|
1880 |
+
if isinstance(obj, dict):
|
1881 |
+
if not obj:
|
1882 |
+
return {}
|
1883 |
+
_type = next(iter(obj))
|
1884 |
+
if _type == "sequence":
|
1885 |
+
_feature = unsimplify(obj).pop(_type)
|
1886 |
+
return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
|
1887 |
+
if _type == "list":
|
1888 |
+
return [from_yaml_inner(unsimplify(obj)[_type])]
|
1889 |
+
if _type == "struct":
|
1890 |
+
return from_yaml_inner(obj["struct"])
|
1891 |
+
elif _type == "dtype":
|
1892 |
+
if isinstance(obj["dtype"], str):
|
1893 |
+
# e.g. int32, float64, string, audio, image
|
1894 |
+
try:
|
1895 |
+
Value(obj["dtype"])
|
1896 |
+
return {**obj, "_type": "Value"}
|
1897 |
+
except ValueError:
|
1898 |
+
# e.g. Audio, Image, ArrayXD
|
1899 |
+
return {"_type": snakecase_to_camelcase(obj["dtype"])}
|
1900 |
+
else:
|
1901 |
+
return from_yaml_inner(obj["dtype"])
|
1902 |
+
else:
|
1903 |
+
return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
|
1904 |
+
elif isinstance(obj, list):
|
1905 |
+
names = [_feature.pop("name") for _feature in obj]
|
1906 |
+
return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
|
1907 |
+
else:
|
1908 |
+
raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
|
1909 |
+
|
1910 |
+
return cls.from_dict(from_yaml_inner(yaml_data))
|
1911 |
+
|
1912 |
+
def encode_example(self, example):
|
1913 |
+
"""
|
1914 |
+
Encode example into a format for Arrow.
|
1915 |
+
|
1916 |
+
Args:
|
1917 |
+
example (`dict[str, Any]`):
|
1918 |
+
Data in a Dataset row.
|
1919 |
+
|
1920 |
+
Returns:
|
1921 |
+
`dict[str, Any]`
|
1922 |
+
"""
|
1923 |
+
example = cast_to_python_objects(example)
|
1924 |
+
return encode_nested_example(self, example)
|
1925 |
+
|
1926 |
+
def encode_column(self, column, column_name: str):
|
1927 |
+
"""
|
1928 |
+
Encode column into a format for Arrow.
|
1929 |
+
|
1930 |
+
Args:
|
1931 |
+
column (`list[Any]`):
|
1932 |
+
Data in a Dataset column.
|
1933 |
+
column_name (`str`):
|
1934 |
+
Dataset column name.
|
1935 |
+
|
1936 |
+
Returns:
|
1937 |
+
`list[Any]`
|
1938 |
+
"""
|
1939 |
+
column = cast_to_python_objects(column)
|
1940 |
+
return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
|
1941 |
+
|
1942 |
+
def encode_batch(self, batch):
|
1943 |
+
"""
|
1944 |
+
Encode batch into a format for Arrow.
|
1945 |
+
|
1946 |
+
Args:
|
1947 |
+
batch (`dict[str, list[Any]]`):
|
1948 |
+
Data in a Dataset batch.
|
1949 |
+
|
1950 |
+
Returns:
|
1951 |
+
`dict[str, list[Any]]`
|
1952 |
+
"""
|
1953 |
+
encoded_batch = {}
|
1954 |
+
if set(batch) != set(self):
|
1955 |
+
raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
|
1956 |
+
for key, column in batch.items():
|
1957 |
+
column = cast_to_python_objects(column)
|
1958 |
+
encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column]
|
1959 |
+
return encoded_batch
|
1960 |
+
|
1961 |
+
def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
|
1962 |
+
"""Decode example with custom feature decoding.
|
1963 |
+
|
1964 |
+
Args:
|
1965 |
+
example (`dict[str, Any]`):
|
1966 |
+
Dataset row data.
|
1967 |
+
token_per_repo_id (`dict`, *optional*):
|
1968 |
+
To access and decode audio or image files from private repositories on the Hub, you can pass
|
1969 |
+
a dictionary `repo_id (str) -> token (bool or str)`.
|
1970 |
+
|
1971 |
+
Returns:
|
1972 |
+
`dict[str, Any]`
|
1973 |
+
"""
|
1974 |
+
|
1975 |
+
return {
|
1976 |
+
column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
|
1977 |
+
if self._column_requires_decoding[column_name]
|
1978 |
+
else value
|
1979 |
+
for column_name, (feature, value) in zip_dict(
|
1980 |
+
{key: value for key, value in self.items() if key in example}, example
|
1981 |
+
)
|
1982 |
+
}
|
1983 |
+
|
1984 |
+
def decode_column(self, column: list, column_name: str):
|
1985 |
+
"""Decode column with custom feature decoding.
|
1986 |
+
|
1987 |
+
Args:
|
1988 |
+
column (`list[Any]`):
|
1989 |
+
Dataset column data.
|
1990 |
+
column_name (`str`):
|
1991 |
+
Dataset column name.
|
1992 |
+
|
1993 |
+
Returns:
|
1994 |
+
`list[Any]`
|
1995 |
+
"""
|
1996 |
+
return (
|
1997 |
+
[decode_nested_example(self[column_name], value) if value is not None else None for value in column]
|
1998 |
+
if self._column_requires_decoding[column_name]
|
1999 |
+
else column
|
2000 |
+
)
|
2001 |
+
|
2002 |
+
def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
|
2003 |
+
"""Decode batch with custom feature decoding.
|
2004 |
+
|
2005 |
+
Args:
|
2006 |
+
batch (`dict[str, list[Any]]`):
|
2007 |
+
Dataset batch data.
|
2008 |
+
token_per_repo_id (`dict`, *optional*):
|
2009 |
+
To access and decode audio or image files from private repositories on the Hub, you can pass
|
2010 |
+
a dictionary repo_id (str) -> token (bool or str)
|
2011 |
+
|
2012 |
+
Returns:
|
2013 |
+
`dict[str, list[Any]]`
|
2014 |
+
"""
|
2015 |
+
decoded_batch = {}
|
2016 |
+
for column_name, column in batch.items():
|
2017 |
+
decoded_batch[column_name] = (
|
2018 |
+
[
|
2019 |
+
decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
|
2020 |
+
if value is not None
|
2021 |
+
else None
|
2022 |
+
for value in column
|
2023 |
+
]
|
2024 |
+
if self._column_requires_decoding[column_name]
|
2025 |
+
else column
|
2026 |
+
)
|
2027 |
+
return decoded_batch
|
2028 |
+
|
2029 |
+
def copy(self) -> "Features":
|
2030 |
+
"""
|
2031 |
+
Make a deep copy of [`Features`].
|
2032 |
+
|
2033 |
+
Returns:
|
2034 |
+
[`Features`]
|
2035 |
+
|
2036 |
+
Example:
|
2037 |
+
|
2038 |
+
```py
|
2039 |
+
>>> from datasets import load_dataset
|
2040 |
+
>>> ds = load_dataset("rotten_tomatoes", split="train")
|
2041 |
+
>>> copy_of_features = ds.features.copy()
|
2042 |
+
>>> copy_of_features
|
2043 |
+
{'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
|
2044 |
+
'text': Value(dtype='string', id=None)}
|
2045 |
+
```
|
2046 |
+
"""
|
2047 |
+
return copy.deepcopy(self)
|
2048 |
+
|
2049 |
+
def reorder_fields_as(self, other: "Features") -> "Features":
|
2050 |
+
"""
|
2051 |
+
Reorder Features fields to match the field order of other [`Features`].
|
2052 |
+
|
2053 |
+
The order of the fields is important since it matters for the underlying arrow data.
|
2054 |
+
Re-ordering the fields allows to make the underlying arrow data type match.
|
2055 |
+
|
2056 |
+
Args:
|
2057 |
+
other ([`Features`]):
|
2058 |
+
The other [`Features`] to align with.
|
2059 |
+
|
2060 |
+
Returns:
|
2061 |
+
[`Features`]
|
2062 |
+
|
2063 |
+
Example::
|
2064 |
+
|
2065 |
+
>>> from datasets import Features, Sequence, Value
|
2066 |
+
>>> # let's say we have to features with a different order of nested fields (for a and b for example)
|
2067 |
+
>>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
|
2068 |
+
>>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
|
2069 |
+
>>> assert f1.type != f2.type
|
2070 |
+
>>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
|
2071 |
+
>>> f1.reorder_fields_as(f2)
|
2072 |
+
{'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
|
2073 |
+
>>> assert f1.reorder_fields_as(f2).type == f2.type
|
2074 |
+
"""
|
2075 |
+
|
2076 |
+
def recursive_reorder(source, target, stack=""):
|
2077 |
+
stack_position = " at " + stack[1:] if stack else ""
|
2078 |
+
if isinstance(target, Sequence):
|
2079 |
+
target = target.feature
|
2080 |
+
if isinstance(target, dict):
|
2081 |
+
target = {k: [v] for k, v in target.items()}
|
2082 |
+
else:
|
2083 |
+
target = [target]
|
2084 |
+
if isinstance(source, Sequence):
|
2085 |
+
source, id_, length = source.feature, source.id, source.length
|
2086 |
+
if isinstance(source, dict):
|
2087 |
+
source = {k: [v] for k, v in source.items()}
|
2088 |
+
reordered = recursive_reorder(source, target, stack)
|
2089 |
+
return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
|
2090 |
+
else:
|
2091 |
+
source = [source]
|
2092 |
+
reordered = recursive_reorder(source, target, stack)
|
2093 |
+
return Sequence(reordered[0], id=id_, length=length)
|
2094 |
+
elif isinstance(source, dict):
|
2095 |
+
if not isinstance(target, dict):
|
2096 |
+
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
|
2097 |
+
if sorted(source) != sorted(target):
|
2098 |
+
message = (
|
2099 |
+
f"Keys mismatch: between {source} (source) and {target} (target).\n"
|
2100 |
+
f"{source.keys()-target.keys()} are missing from target "
|
2101 |
+
f"and {target.keys()-source.keys()} are missing from source" + stack_position
|
2102 |
+
)
|
2103 |
+
raise ValueError(message)
|
2104 |
+
return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
|
2105 |
+
elif isinstance(source, list):
|
2106 |
+
if not isinstance(target, list):
|
2107 |
+
raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
|
2108 |
+
if len(source) != len(target):
|
2109 |
+
raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
|
2110 |
+
return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
|
2111 |
+
else:
|
2112 |
+
return source
|
2113 |
+
|
2114 |
+
return Features(recursive_reorder(self, other))
|
2115 |
+
|
2116 |
+
def flatten(self, max_depth=16) -> "Features":
|
2117 |
+
"""Flatten the features. Every dictionary column is removed and is replaced by
|
2118 |
+
all the subfields it contains. The new fields are named by concatenating the
|
2119 |
+
name of the original column and the subfield name like this: `<original>.<subfield>`.
|
2120 |
+
|
2121 |
+
If a column contains nested dictionaries, then all the lower-level subfields names are
|
2122 |
+
also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
|
2123 |
+
|
2124 |
+
Returns:
|
2125 |
+
[`Features`]:
|
2126 |
+
The flattened features.
|
2127 |
+
|
2128 |
+
Example:
|
2129 |
+
|
2130 |
+
```py
|
2131 |
+
>>> from datasets import load_dataset
|
2132 |
+
>>> ds = load_dataset("squad", split="train")
|
2133 |
+
>>> ds.features.flatten()
|
2134 |
+
{'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
|
2135 |
+
'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
|
2136 |
+
'context': Value(dtype='string', id=None),
|
2137 |
+
'id': Value(dtype='string', id=None),
|
2138 |
+
'question': Value(dtype='string', id=None),
|
2139 |
+
'title': Value(dtype='string', id=None)}
|
2140 |
+
```
|
2141 |
+
"""
|
2142 |
+
for depth in range(1, max_depth):
|
2143 |
+
no_change = True
|
2144 |
+
flattened = self.copy()
|
2145 |
+
for column_name, subfeature in self.items():
|
2146 |
+
if isinstance(subfeature, dict):
|
2147 |
+
no_change = False
|
2148 |
+
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
|
2149 |
+
del flattened[column_name]
|
2150 |
+
elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
|
2151 |
+
no_change = False
|
2152 |
+
flattened.update(
|
2153 |
+
{
|
2154 |
+
f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
|
2155 |
+
for k, v in subfeature.feature.items()
|
2156 |
+
}
|
2157 |
+
)
|
2158 |
+
del flattened[column_name]
|
2159 |
+
elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
|
2160 |
+
no_change = False
|
2161 |
+
flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
|
2162 |
+
del flattened[column_name]
|
2163 |
+
self = flattened
|
2164 |
+
if no_change:
|
2165 |
+
break
|
2166 |
+
return self
|
2167 |
+
|
2168 |
+
|
2169 |
+
def _align_features(features_list: List[Features]) -> List[Features]:
|
2170 |
+
"""Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
|
2171 |
+
name2feature = {}
|
2172 |
+
for features in features_list:
|
2173 |
+
for k, v in features.items():
|
2174 |
+
if k in name2feature and isinstance(v, dict):
|
2175 |
+
# Recursively align features.
|
2176 |
+
name2feature[k] = _align_features([name2feature[k], v])[0]
|
2177 |
+
elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
|
2178 |
+
name2feature[k] = v
|
2179 |
+
|
2180 |
+
return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
|
2181 |
+
|
2182 |
+
|
2183 |
+
def _check_if_features_can_be_aligned(features_list: List[Features]):
|
2184 |
+
"""Check if the dictionaries of features can be aligned.
|
2185 |
+
|
2186 |
+
Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
|
2187 |
+
"""
|
2188 |
+
name2feature = {}
|
2189 |
+
for features in features_list:
|
2190 |
+
for k, v in features.items():
|
2191 |
+
if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
|
2192 |
+
name2feature[k] = v
|
2193 |
+
|
2194 |
+
for features in features_list:
|
2195 |
+
for k, v in features.items():
|
2196 |
+
if isinstance(v, dict) and isinstance(name2feature[k], dict):
|
2197 |
+
# Deep checks for structure.
|
2198 |
+
_check_if_features_can_be_aligned([name2feature[k], v])
|
2199 |
+
elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
|
2200 |
+
raise ValueError(
|
2201 |
+
f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
|
2202 |
+
)
|
llmeval-env/lib/python3.10/site-packages/datasets/features/image.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import warnings
|
4 |
+
from dataclasses import dataclass, field
|
5 |
+
from io import BytesIO
|
6 |
+
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import pyarrow as pa
|
10 |
+
|
11 |
+
from .. import config
|
12 |
+
from ..download.download_config import DownloadConfig
|
13 |
+
from ..table import array_cast
|
14 |
+
from ..utils.file_utils import is_local_path, xopen
|
15 |
+
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
|
16 |
+
|
17 |
+
|
18 |
+
if TYPE_CHECKING:
|
19 |
+
import PIL.Image
|
20 |
+
|
21 |
+
from .features import FeatureType
|
22 |
+
|
23 |
+
|
24 |
+
_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
|
25 |
+
_NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
|
26 |
+
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
|
27 |
+
_VALID_IMAGE_ARRAY_DTPYES = [
|
28 |
+
np.dtype("|b1"),
|
29 |
+
np.dtype("|u1"),
|
30 |
+
np.dtype("<u2"),
|
31 |
+
np.dtype(">u2"),
|
32 |
+
np.dtype("<i2"),
|
33 |
+
np.dtype(">i2"),
|
34 |
+
np.dtype("<u4"),
|
35 |
+
np.dtype(">u4"),
|
36 |
+
np.dtype("<i4"),
|
37 |
+
np.dtype(">i4"),
|
38 |
+
np.dtype("<f4"),
|
39 |
+
np.dtype(">f4"),
|
40 |
+
np.dtype("<f8"),
|
41 |
+
np.dtype(">f8"),
|
42 |
+
]
|
43 |
+
|
44 |
+
|
45 |
+
@dataclass
|
46 |
+
class Image:
|
47 |
+
"""Image [`Feature`] to read image data from an image file.
|
48 |
+
|
49 |
+
Input: The Image feature accepts as input:
|
50 |
+
- A `str`: Absolute path to the image file (i.e. random access is allowed).
|
51 |
+
- A `dict` with the keys:
|
52 |
+
|
53 |
+
- `path`: String with relative path of the image file to the archive file.
|
54 |
+
- `bytes`: Bytes of the image file.
|
55 |
+
|
56 |
+
This is useful for archived files with sequential access.
|
57 |
+
|
58 |
+
- An `np.ndarray`: NumPy array representing an image.
|
59 |
+
- A `PIL.Image.Image`: PIL image object.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
mode (`str`, *optional*):
|
63 |
+
The mode to convert the image to. If `None`, the native mode of the image is used.
|
64 |
+
decode (`bool`, defaults to `True`):
|
65 |
+
Whether to decode the image data. If `False`,
|
66 |
+
returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
|
67 |
+
|
68 |
+
Examples:
|
69 |
+
|
70 |
+
```py
|
71 |
+
>>> from datasets import load_dataset, Image
|
72 |
+
>>> ds = load_dataset("beans", split="train")
|
73 |
+
>>> ds.features["image"]
|
74 |
+
Image(decode=True, id=None)
|
75 |
+
>>> ds[0]["image"]
|
76 |
+
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
|
77 |
+
>>> ds = ds.cast_column('image', Image(decode=False))
|
78 |
+
{'bytes': None,
|
79 |
+
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
|
80 |
+
```
|
81 |
+
"""
|
82 |
+
|
83 |
+
mode: Optional[str] = None
|
84 |
+
decode: bool = True
|
85 |
+
id: Optional[str] = None
|
86 |
+
# Automatically constructed
|
87 |
+
dtype: ClassVar[str] = "PIL.Image.Image"
|
88 |
+
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
|
89 |
+
_type: str = field(default="Image", init=False, repr=False)
|
90 |
+
|
91 |
+
def __call__(self):
|
92 |
+
return self.pa_type
|
93 |
+
|
94 |
+
def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
|
95 |
+
"""Encode example into a format for Arrow.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
|
99 |
+
Data passed as input to Image feature.
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
`dict` with "path" and "bytes" fields
|
103 |
+
"""
|
104 |
+
if config.PIL_AVAILABLE:
|
105 |
+
import PIL.Image
|
106 |
+
else:
|
107 |
+
raise ImportError("To support encoding images, please install 'Pillow'.")
|
108 |
+
|
109 |
+
if isinstance(value, list):
|
110 |
+
value = np.array(value)
|
111 |
+
|
112 |
+
if isinstance(value, str):
|
113 |
+
return {"path": value, "bytes": None}
|
114 |
+
elif isinstance(value, bytes):
|
115 |
+
return {"path": None, "bytes": value}
|
116 |
+
elif isinstance(value, np.ndarray):
|
117 |
+
# convert the image array to PNG/TIFF bytes
|
118 |
+
return encode_np_array(value)
|
119 |
+
elif isinstance(value, PIL.Image.Image):
|
120 |
+
# convert the PIL image to bytes (default format is PNG/TIFF)
|
121 |
+
return encode_pil_image(value)
|
122 |
+
elif value.get("path") is not None and os.path.isfile(value["path"]):
|
123 |
+
# we set "bytes": None to not duplicate the data if they're already available locally
|
124 |
+
return {"bytes": None, "path": value.get("path")}
|
125 |
+
elif value.get("bytes") is not None or value.get("path") is not None:
|
126 |
+
# store the image bytes, and path is used to infer the image format using the file extension
|
127 |
+
return {"bytes": value.get("bytes"), "path": value.get("path")}
|
128 |
+
else:
|
129 |
+
raise ValueError(
|
130 |
+
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
|
131 |
+
)
|
132 |
+
|
133 |
+
def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
|
134 |
+
"""Decode example image file into image data.
|
135 |
+
|
136 |
+
Args:
|
137 |
+
value (`str` or `dict`):
|
138 |
+
A string with the absolute image file path, a dictionary with
|
139 |
+
keys:
|
140 |
+
|
141 |
+
- `path`: String with absolute or relative image file path.
|
142 |
+
- `bytes`: The bytes of the image file.
|
143 |
+
token_per_repo_id (`dict`, *optional*):
|
144 |
+
To access and decode
|
145 |
+
image files from private repositories on the Hub, you can pass
|
146 |
+
a dictionary repo_id (`str`) -> token (`bool` or `str`).
|
147 |
+
|
148 |
+
Returns:
|
149 |
+
`PIL.Image.Image`
|
150 |
+
"""
|
151 |
+
if not self.decode:
|
152 |
+
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
|
153 |
+
|
154 |
+
if config.PIL_AVAILABLE:
|
155 |
+
import PIL.Image
|
156 |
+
import PIL.ImageOps
|
157 |
+
else:
|
158 |
+
raise ImportError("To support decoding images, please install 'Pillow'.")
|
159 |
+
|
160 |
+
if token_per_repo_id is None:
|
161 |
+
token_per_repo_id = {}
|
162 |
+
|
163 |
+
path, bytes_ = value["path"], value["bytes"]
|
164 |
+
if bytes_ is None:
|
165 |
+
if path is None:
|
166 |
+
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
|
167 |
+
else:
|
168 |
+
if is_local_path(path):
|
169 |
+
image = PIL.Image.open(path)
|
170 |
+
else:
|
171 |
+
source_url = path.split("::")[-1]
|
172 |
+
pattern = (
|
173 |
+
config.HUB_DATASETS_URL
|
174 |
+
if source_url.startswith(config.HF_ENDPOINT)
|
175 |
+
else config.HUB_DATASETS_HFFS_URL
|
176 |
+
)
|
177 |
+
try:
|
178 |
+
repo_id = string_to_dict(source_url, pattern)["repo_id"]
|
179 |
+
token = token_per_repo_id.get(repo_id)
|
180 |
+
except ValueError:
|
181 |
+
token = None
|
182 |
+
download_config = DownloadConfig(token=token)
|
183 |
+
with xopen(path, "rb", download_config=download_config) as f:
|
184 |
+
bytes_ = BytesIO(f.read())
|
185 |
+
image = PIL.Image.open(bytes_)
|
186 |
+
else:
|
187 |
+
image = PIL.Image.open(BytesIO(bytes_))
|
188 |
+
image.load() # to avoid "Too many open files" errors
|
189 |
+
if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None:
|
190 |
+
image = PIL.ImageOps.exif_transpose(image)
|
191 |
+
if self.mode and self.mode != image.mode:
|
192 |
+
image = image.convert(self.mode)
|
193 |
+
return image
|
194 |
+
|
195 |
+
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
|
196 |
+
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
|
197 |
+
from .features import Value
|
198 |
+
|
199 |
+
return (
|
200 |
+
self
|
201 |
+
if self.decode
|
202 |
+
else {
|
203 |
+
"bytes": Value("binary"),
|
204 |
+
"path": Value("string"),
|
205 |
+
}
|
206 |
+
)
|
207 |
+
|
208 |
+
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
|
209 |
+
"""Cast an Arrow array to the Image arrow storage type.
|
210 |
+
The Arrow types that can be converted to the Image pyarrow storage type are:
|
211 |
+
|
212 |
+
- `pa.string()` - it must contain the "path" data
|
213 |
+
- `pa.binary()` - it must contain the image bytes
|
214 |
+
- `pa.struct({"bytes": pa.binary()})`
|
215 |
+
- `pa.struct({"path": pa.string()})`
|
216 |
+
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
|
217 |
+
- `pa.list(*)` - it must contain the image array data
|
218 |
+
|
219 |
+
Args:
|
220 |
+
storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
|
221 |
+
PyArrow array to cast.
|
222 |
+
|
223 |
+
Returns:
|
224 |
+
`pa.StructArray`: Array in the Image arrow storage type, that is
|
225 |
+
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
|
226 |
+
"""
|
227 |
+
if pa.types.is_string(storage.type):
|
228 |
+
bytes_array = pa.array([None] * len(storage), type=pa.binary())
|
229 |
+
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
|
230 |
+
elif pa.types.is_binary(storage.type):
|
231 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
232 |
+
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
|
233 |
+
elif pa.types.is_struct(storage.type):
|
234 |
+
if storage.type.get_field_index("bytes") >= 0:
|
235 |
+
bytes_array = storage.field("bytes")
|
236 |
+
else:
|
237 |
+
bytes_array = pa.array([None] * len(storage), type=pa.binary())
|
238 |
+
if storage.type.get_field_index("path") >= 0:
|
239 |
+
path_array = storage.field("path")
|
240 |
+
else:
|
241 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
242 |
+
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
|
243 |
+
elif pa.types.is_list(storage.type):
|
244 |
+
bytes_array = pa.array(
|
245 |
+
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
|
246 |
+
type=pa.binary(),
|
247 |
+
)
|
248 |
+
path_array = pa.array([None] * len(storage), type=pa.string())
|
249 |
+
storage = pa.StructArray.from_arrays(
|
250 |
+
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
|
251 |
+
)
|
252 |
+
return array_cast(storage, self.pa_type)
|
253 |
+
|
254 |
+
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
|
255 |
+
"""Embed image files into the Arrow array.
|
256 |
+
|
257 |
+
Args:
|
258 |
+
storage (`pa.StructArray`):
|
259 |
+
PyArrow array to embed.
|
260 |
+
|
261 |
+
Returns:
|
262 |
+
`pa.StructArray`: Array in the Image arrow storage type, that is
|
263 |
+
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
|
264 |
+
"""
|
265 |
+
|
266 |
+
@no_op_if_value_is_null
|
267 |
+
def path_to_bytes(path):
|
268 |
+
with xopen(path, "rb") as f:
|
269 |
+
bytes_ = f.read()
|
270 |
+
return bytes_
|
271 |
+
|
272 |
+
bytes_array = pa.array(
|
273 |
+
[
|
274 |
+
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
|
275 |
+
for x in storage.to_pylist()
|
276 |
+
],
|
277 |
+
type=pa.binary(),
|
278 |
+
)
|
279 |
+
path_array = pa.array(
|
280 |
+
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
|
281 |
+
type=pa.string(),
|
282 |
+
)
|
283 |
+
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
|
284 |
+
return array_cast(storage, self.pa_type)
|
285 |
+
|
286 |
+
|
287 |
+
def list_image_compression_formats() -> List[str]:
|
288 |
+
if config.PIL_AVAILABLE:
|
289 |
+
import PIL.Image
|
290 |
+
else:
|
291 |
+
raise ImportError("To support encoding images, please install 'Pillow'.")
|
292 |
+
|
293 |
+
global _IMAGE_COMPRESSION_FORMATS
|
294 |
+
if _IMAGE_COMPRESSION_FORMATS is None:
|
295 |
+
PIL.Image.init()
|
296 |
+
_IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
|
297 |
+
return _IMAGE_COMPRESSION_FORMATS
|
298 |
+
|
299 |
+
|
300 |
+
def image_to_bytes(image: "PIL.Image.Image") -> bytes:
|
301 |
+
"""Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
|
302 |
+
buffer = BytesIO()
|
303 |
+
if image.format in list_image_compression_formats():
|
304 |
+
format = image.format
|
305 |
+
else:
|
306 |
+
format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
|
307 |
+
image.save(buffer, format=format)
|
308 |
+
return buffer.getvalue()
|
309 |
+
|
310 |
+
|
311 |
+
def encode_pil_image(image: "PIL.Image.Image") -> dict:
|
312 |
+
if hasattr(image, "filename") and image.filename != "":
|
313 |
+
return {"path": image.filename, "bytes": None}
|
314 |
+
else:
|
315 |
+
return {"path": None, "bytes": image_to_bytes(image)}
|
316 |
+
|
317 |
+
|
318 |
+
def encode_np_array(array: np.ndarray) -> dict:
|
319 |
+
if config.PIL_AVAILABLE:
|
320 |
+
import PIL.Image
|
321 |
+
else:
|
322 |
+
raise ImportError("To support encoding images, please install 'Pillow'.")
|
323 |
+
|
324 |
+
dtype = array.dtype
|
325 |
+
dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
|
326 |
+
dtype_kind = dtype.kind
|
327 |
+
dtype_itemsize = dtype.itemsize
|
328 |
+
|
329 |
+
dest_dtype = None
|
330 |
+
|
331 |
+
# Multi-channel array case (only np.dtype("|u1") is allowed)
|
332 |
+
if array.shape[2:]:
|
333 |
+
if dtype_kind not in ["u", "i"]:
|
334 |
+
raise TypeError(
|
335 |
+
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
|
336 |
+
)
|
337 |
+
dest_dtype = np.dtype("|u1")
|
338 |
+
if dtype != dest_dtype:
|
339 |
+
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
|
340 |
+
# Exact match
|
341 |
+
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
|
342 |
+
dest_dtype = dtype
|
343 |
+
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
|
344 |
+
while dtype_itemsize >= 1:
|
345 |
+
dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
|
346 |
+
if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
|
347 |
+
dest_dtype = np.dtype(dtype_str)
|
348 |
+
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
|
349 |
+
break
|
350 |
+
else:
|
351 |
+
dtype_itemsize //= 2
|
352 |
+
if dest_dtype is None:
|
353 |
+
raise TypeError(
|
354 |
+
f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
|
355 |
+
)
|
356 |
+
|
357 |
+
image = PIL.Image.fromarray(array.astype(dest_dtype))
|
358 |
+
return {"path": None, "bytes": image_to_bytes(image)}
|
359 |
+
|
360 |
+
|
361 |
+
def objects_to_list_of_image_dicts(
|
362 |
+
objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
|
363 |
+
) -> List[dict]:
|
364 |
+
"""Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
|
365 |
+
if config.PIL_AVAILABLE:
|
366 |
+
import PIL.Image
|
367 |
+
else:
|
368 |
+
raise ImportError("To support encoding images, please install 'Pillow'.")
|
369 |
+
|
370 |
+
if objs:
|
371 |
+
_, obj = first_non_null_value(objs)
|
372 |
+
if isinstance(obj, str):
|
373 |
+
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
|
374 |
+
if isinstance(obj, np.ndarray):
|
375 |
+
obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
|
376 |
+
return [obj_to_image_dict_func(obj) for obj in objs]
|
377 |
+
elif isinstance(obj, PIL.Image.Image):
|
378 |
+
obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
|
379 |
+
return [obj_to_image_dict_func(obj) for obj in objs]
|
380 |
+
else:
|
381 |
+
return objs
|
382 |
+
else:
|
383 |
+
return objs
|
llmeval-env/lib/python3.10/site-packages/datasets/features/translation.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass, field
|
2 |
+
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
|
3 |
+
|
4 |
+
import pyarrow as pa
|
5 |
+
|
6 |
+
|
7 |
+
if TYPE_CHECKING:
|
8 |
+
from .features import FeatureType
|
9 |
+
|
10 |
+
|
11 |
+
@dataclass
|
12 |
+
class Translation:
|
13 |
+
"""`FeatureConnector` for translations with fixed languages per example.
|
14 |
+
Here for compatiblity with tfds.
|
15 |
+
|
16 |
+
Args:
|
17 |
+
languages (`dict`):
|
18 |
+
A dictionary for each example mapping string language codes to string translations.
|
19 |
+
|
20 |
+
Example:
|
21 |
+
|
22 |
+
```python
|
23 |
+
>>> # At construction time:
|
24 |
+
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
|
25 |
+
>>> # During data generation:
|
26 |
+
>>> yield {
|
27 |
+
... 'en': 'the cat',
|
28 |
+
... 'fr': 'le chat',
|
29 |
+
... 'de': 'die katze'
|
30 |
+
... }
|
31 |
+
```
|
32 |
+
"""
|
33 |
+
|
34 |
+
languages: List[str]
|
35 |
+
id: Optional[str] = None
|
36 |
+
# Automatically constructed
|
37 |
+
dtype: ClassVar[str] = "dict"
|
38 |
+
pa_type: ClassVar[Any] = None
|
39 |
+
_type: str = field(default="Translation", init=False, repr=False)
|
40 |
+
|
41 |
+
def __call__(self):
|
42 |
+
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
|
43 |
+
|
44 |
+
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
|
45 |
+
"""Flatten the Translation feature into a dictionary."""
|
46 |
+
from .features import Value
|
47 |
+
|
48 |
+
return {k: Value("string") for k in sorted(self.languages)}
|
49 |
+
|
50 |
+
|
51 |
+
@dataclass
|
52 |
+
class TranslationVariableLanguages:
|
53 |
+
"""`FeatureConnector` for translations with variable languages per example.
|
54 |
+
Here for compatiblity with tfds.
|
55 |
+
|
56 |
+
Args:
|
57 |
+
languages (`dict`):
|
58 |
+
A dictionary for each example mapping string language codes to one or more string translations.
|
59 |
+
The languages present may vary from example to example.
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
|
63 |
+
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
|
64 |
+
|
65 |
+
Example:
|
66 |
+
|
67 |
+
```python
|
68 |
+
>>> # At construction time:
|
69 |
+
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
|
70 |
+
>>> # During data generation:
|
71 |
+
>>> yield {
|
72 |
+
... 'en': 'the cat',
|
73 |
+
... 'fr': ['le chat', 'la chatte,']
|
74 |
+
... 'de': 'die katze'
|
75 |
+
... }
|
76 |
+
>>> # Tensor returned :
|
77 |
+
>>> {
|
78 |
+
... 'language': ['en', 'de', 'fr', 'fr'],
|
79 |
+
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
|
80 |
+
... }
|
81 |
+
```
|
82 |
+
"""
|
83 |
+
|
84 |
+
languages: Optional[List] = None
|
85 |
+
num_languages: Optional[int] = None
|
86 |
+
id: Optional[str] = None
|
87 |
+
# Automatically constructed
|
88 |
+
dtype: ClassVar[str] = "dict"
|
89 |
+
pa_type: ClassVar[Any] = None
|
90 |
+
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
|
91 |
+
|
92 |
+
def __post_init__(self):
|
93 |
+
self.languages = sorted(set(self.languages)) if self.languages else None
|
94 |
+
self.num_languages = len(self.languages) if self.languages else None
|
95 |
+
|
96 |
+
def __call__(self):
|
97 |
+
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
|
98 |
+
|
99 |
+
def encode_example(self, translation_dict):
|
100 |
+
lang_set = set(self.languages)
|
101 |
+
if set(translation_dict) == {"language", "translation"}:
|
102 |
+
return translation_dict
|
103 |
+
elif self.languages and set(translation_dict) - lang_set:
|
104 |
+
raise ValueError(
|
105 |
+
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
|
106 |
+
)
|
107 |
+
|
108 |
+
# Convert dictionary into tuples, splitting out cases where there are
|
109 |
+
# multiple translations for a single language.
|
110 |
+
translation_tuples = []
|
111 |
+
for lang, text in translation_dict.items():
|
112 |
+
if isinstance(text, str):
|
113 |
+
translation_tuples.append((lang, text))
|
114 |
+
else:
|
115 |
+
translation_tuples.extend([(lang, el) for el in text])
|
116 |
+
|
117 |
+
# Ensure translations are in ascending order by language code.
|
118 |
+
languages, translations = zip(*sorted(translation_tuples))
|
119 |
+
|
120 |
+
return {"language": languages, "translation": translations}
|
121 |
+
|
122 |
+
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
|
123 |
+
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
|
124 |
+
from .features import Sequence, Value
|
125 |
+
|
126 |
+
return {
|
127 |
+
"language": Sequence(Value("string")),
|
128 |
+
"translation": Sequence(Value("string")),
|
129 |
+
}
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (204 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc
ADDED
Binary file (3.07 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from typing import Optional
|
4 |
+
|
5 |
+
import pyarrow as pa
|
6 |
+
|
7 |
+
import datasets
|
8 |
+
from datasets.table import table_cast
|
9 |
+
|
10 |
+
|
11 |
+
logger = datasets.utils.logging.get_logger(__name__)
|
12 |
+
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class ArrowConfig(datasets.BuilderConfig):
|
16 |
+
"""BuilderConfig for Arrow."""
|
17 |
+
|
18 |
+
features: Optional[datasets.Features] = None
|
19 |
+
|
20 |
+
|
21 |
+
class Arrow(datasets.ArrowBasedBuilder):
|
22 |
+
BUILDER_CONFIG_CLASS = ArrowConfig
|
23 |
+
|
24 |
+
def _info(self):
|
25 |
+
return datasets.DatasetInfo(features=self.config.features)
|
26 |
+
|
27 |
+
def _split_generators(self, dl_manager):
|
28 |
+
"""We handle string, list and dicts in datafiles"""
|
29 |
+
if not self.config.data_files:
|
30 |
+
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
|
31 |
+
dl_manager.download_config.extract_on_the_fly = True
|
32 |
+
data_files = dl_manager.download_and_extract(self.config.data_files)
|
33 |
+
if isinstance(data_files, (str, list, tuple)):
|
34 |
+
files = data_files
|
35 |
+
if isinstance(files, str):
|
36 |
+
files = [files]
|
37 |
+
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
|
38 |
+
files = [dl_manager.iter_files(file) for file in files]
|
39 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
|
40 |
+
splits = []
|
41 |
+
for split_name, files in data_files.items():
|
42 |
+
if isinstance(files, str):
|
43 |
+
files = [files]
|
44 |
+
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
|
45 |
+
files = [dl_manager.iter_files(file) for file in files]
|
46 |
+
# Infer features is they are stoed in the arrow schema
|
47 |
+
if self.info.features is None:
|
48 |
+
for file in itertools.chain.from_iterable(files):
|
49 |
+
with open(file, "rb") as f:
|
50 |
+
self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
|
51 |
+
break
|
52 |
+
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
|
53 |
+
return splits
|
54 |
+
|
55 |
+
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
|
56 |
+
if self.info.features is not None:
|
57 |
+
# more expensive cast to support nested features with keys in a different order
|
58 |
+
# allows str <-> int/float or str to Audio for example
|
59 |
+
pa_table = table_cast(pa_table, self.info.features.arrow_schema)
|
60 |
+
return pa_table
|
61 |
+
|
62 |
+
def _generate_tables(self, files):
|
63 |
+
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
|
64 |
+
with open(file, "rb") as f:
|
65 |
+
try:
|
66 |
+
for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
|
67 |
+
pa_table = pa.Table.from_batches([record_batch])
|
68 |
+
# Uncomment for debugging (will print the Arrow table size and elements)
|
69 |
+
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
|
70 |
+
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
|
71 |
+
yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
|
72 |
+
except ValueError as e:
|
73 |
+
logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
|
74 |
+
raise
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
import datasets
|
4 |
+
from datasets.tasks import AudioClassification
|
5 |
+
|
6 |
+
from ..folder_based_builder import folder_based_builder
|
7 |
+
|
8 |
+
|
9 |
+
logger = datasets.utils.logging.get_logger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
|
13 |
+
"""Builder Config for AudioFolder."""
|
14 |
+
|
15 |
+
drop_labels: bool = None
|
16 |
+
drop_metadata: bool = None
|
17 |
+
|
18 |
+
|
19 |
+
class AudioFolder(folder_based_builder.FolderBasedBuilder):
|
20 |
+
BASE_FEATURE = datasets.Audio
|
21 |
+
BASE_COLUMN_NAME = "audio"
|
22 |
+
BUILDER_CONFIG_CLASS = AudioFolderConfig
|
23 |
+
EXTENSIONS: List[str] # definition at the bottom of the script
|
24 |
+
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
|
25 |
+
|
26 |
+
|
27 |
+
# Obtained with:
|
28 |
+
# ```
|
29 |
+
# import soundfile as sf
|
30 |
+
#
|
31 |
+
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
|
32 |
+
#
|
33 |
+
# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
|
34 |
+
# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
|
35 |
+
# ```
|
36 |
+
# We intentionally do not run this code on launch because:
|
37 |
+
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
|
38 |
+
# (2) To ensure the list of supported extensions is deterministic
|
39 |
+
AUDIO_EXTENSIONS = [
|
40 |
+
".aiff",
|
41 |
+
".au",
|
42 |
+
".avr",
|
43 |
+
".caf",
|
44 |
+
".flac",
|
45 |
+
".htk",
|
46 |
+
".svx",
|
47 |
+
".mat4",
|
48 |
+
".mat5",
|
49 |
+
".mpc2k",
|
50 |
+
".ogg",
|
51 |
+
".paf",
|
52 |
+
".pvf",
|
53 |
+
".raw",
|
54 |
+
".rf64",
|
55 |
+
".sd2",
|
56 |
+
".sds",
|
57 |
+
".ircam",
|
58 |
+
".voc",
|
59 |
+
".w64",
|
60 |
+
".wav",
|
61 |
+
".nist",
|
62 |
+
".wavex",
|
63 |
+
".wve",
|
64 |
+
".xi",
|
65 |
+
".mp3",
|
66 |
+
".opus",
|
67 |
+
]
|
68 |
+
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc
ADDED
Binary file (1.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (209 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc
ADDED
Binary file (8.85 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py
ADDED
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.
|
3 |
+
# This file coems from the WebDataset library.
|
4 |
+
# See the LICENSE file for licensing terms (BSD-style).
|
5 |
+
#
|
6 |
+
|
7 |
+
"""
|
8 |
+
Binary tensor encodings for PyTorch and NumPy.
|
9 |
+
|
10 |
+
This defines efficient binary encodings for tensors. The format is 8 byte
|
11 |
+
aligned and can be used directly for computations when transmitted, say,
|
12 |
+
via RDMA. The format is supported by WebDataset with the `.ten` filename
|
13 |
+
extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used
|
14 |
+
for fast tensor storage with LMDB and in disk files (which can be memory
|
15 |
+
mapped)
|
16 |
+
|
17 |
+
Data is encoded as a series of chunks:
|
18 |
+
|
19 |
+
- magic number (int64)
|
20 |
+
- length in bytes (int64)
|
21 |
+
- bytes (multiple of 64 bytes long)
|
22 |
+
|
23 |
+
Arrays are a header chunk followed by a data chunk.
|
24 |
+
Header chunks have the following structure:
|
25 |
+
|
26 |
+
- dtype (int64)
|
27 |
+
- 8 byte array name
|
28 |
+
- ndim (int64)
|
29 |
+
- dim[0]
|
30 |
+
- dim[1]
|
31 |
+
- ...
|
32 |
+
"""
|
33 |
+
|
34 |
+
import struct
|
35 |
+
import sys
|
36 |
+
|
37 |
+
import numpy as np
|
38 |
+
|
39 |
+
|
40 |
+
def bytelen(a):
|
41 |
+
"""Determine the length of a in bytes."""
|
42 |
+
if hasattr(a, "nbytes"):
|
43 |
+
return a.nbytes
|
44 |
+
elif isinstance(a, (bytearray, bytes)):
|
45 |
+
return len(a)
|
46 |
+
else:
|
47 |
+
raise ValueError(a, "cannot determine nbytes")
|
48 |
+
|
49 |
+
|
50 |
+
def bytedata(a):
|
51 |
+
"""Return a the raw data corresponding to a."""
|
52 |
+
if isinstance(a, (bytearray, bytes, memoryview)):
|
53 |
+
return a
|
54 |
+
elif hasattr(a, "data"):
|
55 |
+
return a.data
|
56 |
+
else:
|
57 |
+
raise ValueError(a, "cannot return bytedata")
|
58 |
+
|
59 |
+
|
60 |
+
# tables for converting between long/short NumPy dtypes
|
61 |
+
|
62 |
+
long_to_short = """
|
63 |
+
float16 f2
|
64 |
+
float32 f4
|
65 |
+
float64 f8
|
66 |
+
int8 i1
|
67 |
+
int16 i2
|
68 |
+
int32 i4
|
69 |
+
int64 i8
|
70 |
+
uint8 u1
|
71 |
+
uint16 u2
|
72 |
+
unit32 u4
|
73 |
+
uint64 u8
|
74 |
+
""".strip()
|
75 |
+
long_to_short = [x.split() for x in long_to_short.split("\n")]
|
76 |
+
long_to_short = {x[0]: x[1] for x in long_to_short}
|
77 |
+
short_to_long = {v: k for k, v in long_to_short.items()}
|
78 |
+
|
79 |
+
|
80 |
+
def check_acceptable_input_type(data, allow64):
|
81 |
+
"""Check that the data has an acceptable type for tensor encoding.
|
82 |
+
|
83 |
+
:param data: array
|
84 |
+
:param allow64: allow 64 bit types
|
85 |
+
"""
|
86 |
+
for a in data:
|
87 |
+
if a.dtype.name not in long_to_short:
|
88 |
+
raise ValueError("unsupported dataypte")
|
89 |
+
if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]:
|
90 |
+
raise ValueError("64 bit datatypes not allowed unless explicitly enabled")
|
91 |
+
|
92 |
+
|
93 |
+
def str64(s):
|
94 |
+
"""Convert a string to an int64."""
|
95 |
+
s = s + "\0" * (8 - len(s))
|
96 |
+
s = s.encode("ascii")
|
97 |
+
return struct.unpack("@q", s)[0]
|
98 |
+
|
99 |
+
|
100 |
+
def unstr64(i):
|
101 |
+
"""Convert an int64 to a string."""
|
102 |
+
b = struct.pack("@q", i)
|
103 |
+
return b.decode("ascii").strip("\0")
|
104 |
+
|
105 |
+
|
106 |
+
def check_infos(data, infos, required_infos=None):
|
107 |
+
"""Verify the info strings."""
|
108 |
+
if required_infos is False or required_infos is None:
|
109 |
+
return data
|
110 |
+
if required_infos is True:
|
111 |
+
return data, infos
|
112 |
+
if not isinstance(required_infos, (tuple, list)):
|
113 |
+
raise ValueError("required_infos must be tuple or list")
|
114 |
+
for required, actual in zip(required_infos, infos):
|
115 |
+
raise ValueError(f"actual info {actual} doesn't match required info {required}")
|
116 |
+
return data
|
117 |
+
|
118 |
+
|
119 |
+
def encode_header(a, info=""):
|
120 |
+
"""Encode an array header as a byte array."""
|
121 |
+
if a.ndim >= 10:
|
122 |
+
raise ValueError("too many dimensions")
|
123 |
+
if a.nbytes != np.prod(a.shape) * a.itemsize:
|
124 |
+
raise ValueError("mismatch between size and shape")
|
125 |
+
if a.dtype.name not in long_to_short:
|
126 |
+
raise ValueError("unsupported array type")
|
127 |
+
header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape)
|
128 |
+
return bytedata(np.array(header, dtype="i8"))
|
129 |
+
|
130 |
+
|
131 |
+
def decode_header(h):
|
132 |
+
"""Decode a byte array into an array header."""
|
133 |
+
h = np.frombuffer(h, dtype="i8")
|
134 |
+
if unstr64(h[0]) not in short_to_long:
|
135 |
+
raise ValueError("unsupported array type")
|
136 |
+
dtype = np.dtype(short_to_long[unstr64(h[0])])
|
137 |
+
info = unstr64(h[1])
|
138 |
+
rank = int(h[2])
|
139 |
+
shape = tuple(h[3 : 3 + rank])
|
140 |
+
return shape, dtype, info
|
141 |
+
|
142 |
+
|
143 |
+
def encode_list(l, infos=None): # noqa: E741
|
144 |
+
"""Given a list of arrays, encode them into a list of byte arrays."""
|
145 |
+
if infos is None:
|
146 |
+
infos = [""]
|
147 |
+
else:
|
148 |
+
if len(l) != len(infos):
|
149 |
+
raise ValueError(f"length of list {l} must muatch length of infos {infos}")
|
150 |
+
result = []
|
151 |
+
for i, a in enumerate(l):
|
152 |
+
header = encode_header(a, infos[i % len(infos)])
|
153 |
+
result += [header, bytedata(a)]
|
154 |
+
return result
|
155 |
+
|
156 |
+
|
157 |
+
def decode_list(l, infos=False): # noqa: E741
|
158 |
+
"""Given a list of byte arrays, decode them into arrays."""
|
159 |
+
result = []
|
160 |
+
infos0 = []
|
161 |
+
for header, data in zip(l[::2], l[1::2]):
|
162 |
+
shape, dtype, info = decode_header(header)
|
163 |
+
a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape)
|
164 |
+
result += [a]
|
165 |
+
infos0 += [info]
|
166 |
+
return check_infos(result, infos0, infos)
|
167 |
+
|
168 |
+
|
169 |
+
magic_str = "~TenBin~"
|
170 |
+
magic = str64(magic_str)
|
171 |
+
magic_bytes = unstr64(magic).encode("ascii")
|
172 |
+
|
173 |
+
|
174 |
+
def roundup(n, k=64):
|
175 |
+
"""Round up to the next multiple of 64."""
|
176 |
+
return k * ((n + k - 1) // k)
|
177 |
+
|
178 |
+
|
179 |
+
def encode_chunks(l): # noqa: E741
|
180 |
+
"""Encode a list of chunks into a single byte array, with lengths and magics.."""
|
181 |
+
size = sum(16 + roundup(b.nbytes) for b in l)
|
182 |
+
result = bytearray(size)
|
183 |
+
offset = 0
|
184 |
+
for b in l:
|
185 |
+
result[offset : offset + 8] = magic_bytes
|
186 |
+
offset += 8
|
187 |
+
result[offset : offset + 8] = struct.pack("@q", b.nbytes)
|
188 |
+
offset += 8
|
189 |
+
result[offset : offset + bytelen(b)] = b
|
190 |
+
offset += roundup(bytelen(b))
|
191 |
+
return result
|
192 |
+
|
193 |
+
|
194 |
+
def decode_chunks(buf):
|
195 |
+
"""Decode a byte array into a list of chunks."""
|
196 |
+
result = []
|
197 |
+
offset = 0
|
198 |
+
total = bytelen(buf)
|
199 |
+
while offset < total:
|
200 |
+
if magic_bytes != buf[offset : offset + 8]:
|
201 |
+
raise ValueError("magic bytes mismatch")
|
202 |
+
offset += 8
|
203 |
+
nbytes = struct.unpack("@q", buf[offset : offset + 8])[0]
|
204 |
+
offset += 8
|
205 |
+
b = buf[offset : offset + nbytes]
|
206 |
+
offset += roundup(nbytes)
|
207 |
+
result.append(b)
|
208 |
+
return result
|
209 |
+
|
210 |
+
|
211 |
+
def encode_buffer(l, infos=None): # noqa: E741
|
212 |
+
"""Encode a list of arrays into a single byte array."""
|
213 |
+
if not isinstance(l, list):
|
214 |
+
raise ValueError("requires list")
|
215 |
+
return encode_chunks(encode_list(l, infos=infos))
|
216 |
+
|
217 |
+
|
218 |
+
def decode_buffer(buf, infos=False):
|
219 |
+
"""Decode a byte array into a list of arrays."""
|
220 |
+
return decode_list(decode_chunks(buf), infos=infos)
|
221 |
+
|
222 |
+
|
223 |
+
def write_chunk(stream, buf):
|
224 |
+
"""Write a byte chunk to the stream with magics, length, and padding."""
|
225 |
+
nbytes = bytelen(buf)
|
226 |
+
stream.write(magic_bytes)
|
227 |
+
stream.write(struct.pack("@q", nbytes))
|
228 |
+
stream.write(bytedata(buf))
|
229 |
+
padding = roundup(nbytes) - nbytes
|
230 |
+
if padding > 0:
|
231 |
+
stream.write(b"\0" * padding)
|
232 |
+
|
233 |
+
|
234 |
+
def read_chunk(stream):
|
235 |
+
"""Read a byte chunk from a stream with magics, length, and padding."""
|
236 |
+
magic = stream.read(8)
|
237 |
+
if magic == b"":
|
238 |
+
return None
|
239 |
+
if magic != magic_bytes:
|
240 |
+
raise ValueError("magic number does not match")
|
241 |
+
nbytes = stream.read(8)
|
242 |
+
nbytes = struct.unpack("@q", nbytes)[0]
|
243 |
+
if nbytes < 0:
|
244 |
+
raise ValueError("negative nbytes")
|
245 |
+
data = stream.read(nbytes)
|
246 |
+
padding = roundup(nbytes) - nbytes
|
247 |
+
if padding > 0:
|
248 |
+
stream.read(padding)
|
249 |
+
return data
|
250 |
+
|
251 |
+
|
252 |
+
def write(stream, l, infos=None): # noqa: E741
|
253 |
+
"""Write a list of arrays to a stream, with magics, length, and padding."""
|
254 |
+
for chunk in encode_list(l, infos=infos):
|
255 |
+
write_chunk(stream, chunk)
|
256 |
+
|
257 |
+
|
258 |
+
def read(stream, n=sys.maxsize, infos=False):
|
259 |
+
"""Read a list of arrays from a stream, with magics, length, and padding."""
|
260 |
+
chunks = []
|
261 |
+
for _ in range(n):
|
262 |
+
header = read_chunk(stream)
|
263 |
+
if header is None:
|
264 |
+
break
|
265 |
+
data = read_chunk(stream)
|
266 |
+
if data is None:
|
267 |
+
raise ValueError("premature EOF")
|
268 |
+
chunks += [header, data]
|
269 |
+
return decode_list(chunks, infos=infos)
|
270 |
+
|
271 |
+
|
272 |
+
def save(fname, *args, infos=None, nocheck=False):
|
273 |
+
"""Save a list of arrays to a file, with magics, length, and padding."""
|
274 |
+
if not nocheck and not fname.endswith(".ten"):
|
275 |
+
raise ValueError("file name should end in .ten")
|
276 |
+
with open(fname, "wb") as stream:
|
277 |
+
write(stream, args, infos=infos)
|
278 |
+
|
279 |
+
|
280 |
+
def load(fname, infos=False, nocheck=False):
|
281 |
+
"""Read a list of arrays from a file, with magics, length, and padding."""
|
282 |
+
if not nocheck and not fname.endswith(".ten"):
|
283 |
+
raise ValueError("file name should end in .ten")
|
284 |
+
with open(fname, "rb") as stream:
|
285 |
+
return read(stream, infos=infos)
|
llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py
ADDED
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import json
|
3 |
+
from itertools import islice
|
4 |
+
from typing import Any, Callable, Dict, List
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pyarrow as pa
|
8 |
+
|
9 |
+
import datasets
|
10 |
+
|
11 |
+
|
12 |
+
logger = datasets.utils.logging.get_logger(__name__)
|
13 |
+
|
14 |
+
|
15 |
+
class WebDataset(datasets.GeneratorBasedBuilder):
|
16 |
+
DEFAULT_WRITER_BATCH_SIZE = 100
|
17 |
+
IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script
|
18 |
+
AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script
|
19 |
+
DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script
|
20 |
+
NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5
|
21 |
+
|
22 |
+
@classmethod
|
23 |
+
def _get_pipeline_from_tar(cls, tar_path, tar_iterator):
|
24 |
+
current_example = {}
|
25 |
+
for filename, f in tar_iterator:
|
26 |
+
if "." in filename:
|
27 |
+
example_key, field_name = filename.split(".", 1)
|
28 |
+
if current_example and current_example["__key__"] != example_key:
|
29 |
+
yield current_example
|
30 |
+
current_example = {}
|
31 |
+
current_example["__key__"] = example_key
|
32 |
+
current_example["__url__"] = tar_path
|
33 |
+
current_example[field_name.lower()] = f.read()
|
34 |
+
if field_name in cls.DECODERS:
|
35 |
+
current_example[field_name] = cls.DECODERS[field_name](current_example[field_name])
|
36 |
+
if current_example:
|
37 |
+
yield current_example
|
38 |
+
|
39 |
+
def _info(self) -> datasets.DatasetInfo:
|
40 |
+
return datasets.DatasetInfo()
|
41 |
+
|
42 |
+
def _split_generators(self, dl_manager):
|
43 |
+
"""We handle string, list and dicts in datafiles"""
|
44 |
+
# Download the data files
|
45 |
+
if not self.config.data_files:
|
46 |
+
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
|
47 |
+
data_files = dl_manager.download(self.config.data_files)
|
48 |
+
if isinstance(data_files, (str, list, tuple)):
|
49 |
+
tar_paths = data_files
|
50 |
+
if isinstance(tar_paths, str):
|
51 |
+
tar_paths = [tar_paths]
|
52 |
+
tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
|
53 |
+
splits = [
|
54 |
+
datasets.SplitGenerator(
|
55 |
+
name=datasets.Split.TRAIN, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
|
56 |
+
)
|
57 |
+
]
|
58 |
+
else:
|
59 |
+
splits = []
|
60 |
+
for split_name, tar_paths in data_files.items():
|
61 |
+
if isinstance(tar_paths, str):
|
62 |
+
tar_paths = [tar_paths]
|
63 |
+
tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
|
64 |
+
splits.append(
|
65 |
+
datasets.SplitGenerator(
|
66 |
+
name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
|
67 |
+
)
|
68 |
+
)
|
69 |
+
if not self.info.features:
|
70 |
+
# Get one example to get the feature types
|
71 |
+
pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0])
|
72 |
+
first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE))
|
73 |
+
if any(example.keys() != first_examples[0].keys() for example in first_examples):
|
74 |
+
raise ValueError(
|
75 |
+
"The TAR archives of the dataset should be in WebDataset format, "
|
76 |
+
"but the files in the archive don't share the same prefix or the same types."
|
77 |
+
)
|
78 |
+
pa_tables = [pa.Table.from_pylist([example]) for example in first_examples]
|
79 |
+
if datasets.config.PYARROW_VERSION.major < 14:
|
80 |
+
inferred_arrow_schema = pa.concat_tables(pa_tables, promote=True).schema
|
81 |
+
else:
|
82 |
+
inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema
|
83 |
+
features = datasets.Features.from_arrow_schema(inferred_arrow_schema)
|
84 |
+
|
85 |
+
# Set Image types
|
86 |
+
for field_name in first_examples[0]:
|
87 |
+
extension = field_name.rsplit(".", 1)[-1]
|
88 |
+
if extension in self.IMAGE_EXTENSIONS:
|
89 |
+
features[field_name] = datasets.Image()
|
90 |
+
# Set Audio types
|
91 |
+
for field_name in first_examples[0]:
|
92 |
+
extension = field_name.rsplit(".", 1)[-1]
|
93 |
+
if extension in self.AUDIO_EXTENSIONS:
|
94 |
+
features[field_name] = datasets.Audio()
|
95 |
+
self.info.features = features
|
96 |
+
|
97 |
+
return splits
|
98 |
+
|
99 |
+
def _generate_examples(self, tar_paths, tar_iterators):
|
100 |
+
image_field_names = [
|
101 |
+
field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image)
|
102 |
+
]
|
103 |
+
audio_field_names = [
|
104 |
+
field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio)
|
105 |
+
]
|
106 |
+
for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)):
|
107 |
+
for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)):
|
108 |
+
for field_name in image_field_names + audio_field_names:
|
109 |
+
example[field_name] = {"path": example["__key__"] + "." + field_name, "bytes": example[field_name]}
|
110 |
+
yield f"{tar_idx}_{example_idx}", example
|
111 |
+
|
112 |
+
|
113 |
+
# Obtained with:
|
114 |
+
# ```
|
115 |
+
# import PIL.Image
|
116 |
+
# IMAGE_EXTENSIONS = []
|
117 |
+
# PIL.Image.init()
|
118 |
+
# for ext, format in PIL.Image.EXTENSION.items():
|
119 |
+
# if format in PIL.Image.OPEN:
|
120 |
+
# IMAGE_EXTENSIONS.append(ext[1:])
|
121 |
+
# ```
|
122 |
+
# We intentionally do not run this code on launch because:
|
123 |
+
# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
|
124 |
+
# (2) To ensure the list of supported extensions is deterministic
|
125 |
+
IMAGE_EXTENSIONS = [
|
126 |
+
"blp",
|
127 |
+
"bmp",
|
128 |
+
"dib",
|
129 |
+
"bufr",
|
130 |
+
"cur",
|
131 |
+
"pcx",
|
132 |
+
"dcx",
|
133 |
+
"dds",
|
134 |
+
"ps",
|
135 |
+
"eps",
|
136 |
+
"fit",
|
137 |
+
"fits",
|
138 |
+
"fli",
|
139 |
+
"flc",
|
140 |
+
"ftc",
|
141 |
+
"ftu",
|
142 |
+
"gbr",
|
143 |
+
"gif",
|
144 |
+
"grib",
|
145 |
+
"h5",
|
146 |
+
"hdf",
|
147 |
+
"png",
|
148 |
+
"apng",
|
149 |
+
"jp2",
|
150 |
+
"j2k",
|
151 |
+
"jpc",
|
152 |
+
"jpf",
|
153 |
+
"jpx",
|
154 |
+
"j2c",
|
155 |
+
"icns",
|
156 |
+
"ico",
|
157 |
+
"im",
|
158 |
+
"iim",
|
159 |
+
"tif",
|
160 |
+
"tiff",
|
161 |
+
"jfif",
|
162 |
+
"jpe",
|
163 |
+
"jpg",
|
164 |
+
"jpeg",
|
165 |
+
"mpg",
|
166 |
+
"mpeg",
|
167 |
+
"msp",
|
168 |
+
"pcd",
|
169 |
+
"pxr",
|
170 |
+
"pbm",
|
171 |
+
"pgm",
|
172 |
+
"ppm",
|
173 |
+
"pnm",
|
174 |
+
"psd",
|
175 |
+
"bw",
|
176 |
+
"rgb",
|
177 |
+
"rgba",
|
178 |
+
"sgi",
|
179 |
+
"ras",
|
180 |
+
"tga",
|
181 |
+
"icb",
|
182 |
+
"vda",
|
183 |
+
"vst",
|
184 |
+
"webp",
|
185 |
+
"wmf",
|
186 |
+
"emf",
|
187 |
+
"xbm",
|
188 |
+
"xpm",
|
189 |
+
]
|
190 |
+
WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS
|
191 |
+
|
192 |
+
|
193 |
+
# Obtained with:
|
194 |
+
# ```
|
195 |
+
# import soundfile as sf
|
196 |
+
#
|
197 |
+
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
|
198 |
+
#
|
199 |
+
# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
|
200 |
+
# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
|
201 |
+
# ```
|
202 |
+
# We intentionally do not run this code on launch because:
|
203 |
+
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
|
204 |
+
# (2) To ensure the list of supported extensions is deterministic
|
205 |
+
AUDIO_EXTENSIONS = [
|
206 |
+
"aiff",
|
207 |
+
"au",
|
208 |
+
"avr",
|
209 |
+
"caf",
|
210 |
+
"flac",
|
211 |
+
"htk",
|
212 |
+
"svx",
|
213 |
+
"mat4",
|
214 |
+
"mat5",
|
215 |
+
"mpc2k",
|
216 |
+
"ogg",
|
217 |
+
"paf",
|
218 |
+
"pvf",
|
219 |
+
"raw",
|
220 |
+
"rf64",
|
221 |
+
"sd2",
|
222 |
+
"sds",
|
223 |
+
"ircam",
|
224 |
+
"voc",
|
225 |
+
"w64",
|
226 |
+
"wav",
|
227 |
+
"nist",
|
228 |
+
"wavex",
|
229 |
+
"wve",
|
230 |
+
"xi",
|
231 |
+
"mp3",
|
232 |
+
"opus",
|
233 |
+
]
|
234 |
+
WebDataset.AUDIO_EXTENSIONS = AUDIO_EXTENSIONS
|
235 |
+
|
236 |
+
|
237 |
+
def text_loads(data: bytes):
|
238 |
+
return data.decode("utf-8")
|
239 |
+
|
240 |
+
|
241 |
+
def tenbin_loads(data: bytes):
|
242 |
+
from . import _tenbin
|
243 |
+
|
244 |
+
return _tenbin.decode_buffer(data)
|
245 |
+
|
246 |
+
|
247 |
+
def msgpack_loads(data: bytes):
|
248 |
+
import msgpack
|
249 |
+
|
250 |
+
return msgpack.unpackb(data)
|
251 |
+
|
252 |
+
|
253 |
+
def npy_loads(data: bytes):
|
254 |
+
import numpy.lib.format
|
255 |
+
|
256 |
+
stream = io.BytesIO(data)
|
257 |
+
return numpy.lib.format.read_array(stream, allow_pickle=False)
|
258 |
+
|
259 |
+
|
260 |
+
def npz_loads(data: bytes):
|
261 |
+
return np.load(io.BytesIO(data), allow_pickle=False)
|
262 |
+
|
263 |
+
|
264 |
+
def cbor_loads(data: bytes):
|
265 |
+
import cbor
|
266 |
+
|
267 |
+
return cbor.loads(data)
|
268 |
+
|
269 |
+
|
270 |
+
# Obtained by checking `decoders` in `webdataset.autodecode`
|
271 |
+
# and removing unsafe extension decoders.
|
272 |
+
# Removed Pickle decoders:
|
273 |
+
# - "pyd": lambda data: pickle.loads(data)
|
274 |
+
# - "pickle": lambda data: pickle.loads(data)
|
275 |
+
# Removed Torch decoders:
|
276 |
+
# - "pth": lambda data: torch_loads(data)
|
277 |
+
# Modified NumPy decoders to fix CVE-2019-6446 (add allow_pickle=False):
|
278 |
+
# - "npy": npy_loads,
|
279 |
+
# - "npz": lambda data: np.load(io.BytesIO(data)),
|
280 |
+
DECODERS = {
|
281 |
+
"txt": text_loads,
|
282 |
+
"text": text_loads,
|
283 |
+
"transcript": text_loads,
|
284 |
+
"cls": int,
|
285 |
+
"cls2": int,
|
286 |
+
"index": int,
|
287 |
+
"inx": int,
|
288 |
+
"id": int,
|
289 |
+
"json": json.loads,
|
290 |
+
"jsn": json.loads,
|
291 |
+
"ten": tenbin_loads,
|
292 |
+
"tb": tenbin_loads,
|
293 |
+
"mp": msgpack_loads,
|
294 |
+
"msg": msgpack_loads,
|
295 |
+
"npy": npy_loads,
|
296 |
+
"npz": npz_loads,
|
297 |
+
"cbor": cbor_loads,
|
298 |
+
}
|
299 |
+
WebDataset.DECODERS = DECODERS
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__init__.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
# ruff: noqa
|
16 |
+
|
17 |
+
from . import tqdm as _tqdm # _tqdm is the module
|
18 |
+
from .info_utils import VerificationMode
|
19 |
+
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
|
20 |
+
from .version import Version
|
21 |
+
from .experimental import experimental
|
22 |
+
from .tqdm import (
|
23 |
+
disable_progress_bars,
|
24 |
+
enable_progress_bars,
|
25 |
+
are_progress_bars_disabled,
|
26 |
+
tqdm,
|
27 |
+
)
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dill.cpython-310.pyc
ADDED
Binary file (8.44 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/experimental.cpython-310.pyc
ADDED
Binary file (1.44 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/file_utils.cpython-310.pyc
ADDED
Binary file (51.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc
ADDED
Binary file (369 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc
ADDED
Binary file (12.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/patching.cpython-310.pyc
ADDED
Binary file (3.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc
ADDED
Binary file (24.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc
ADDED
Binary file (3.17 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc
ADDED
Binary file (17.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc
ADDED
Binary file (2.36 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/_dataset_viewer.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, List, Optional, Union
|
2 |
+
|
3 |
+
from .. import config
|
4 |
+
from ..exceptions import DatasetsError
|
5 |
+
from .file_utils import (
|
6 |
+
get_authentication_headers_for_url,
|
7 |
+
http_get,
|
8 |
+
)
|
9 |
+
from .logging import get_logger
|
10 |
+
|
11 |
+
|
12 |
+
logger = get_logger(__name__)
|
13 |
+
|
14 |
+
|
15 |
+
class DatasetViewerError(DatasetsError):
|
16 |
+
"""Dataset viewer error.
|
17 |
+
|
18 |
+
Raised when trying to use the dataset viewer HTTP API and when trying to access:
|
19 |
+
- a missing dataset, or
|
20 |
+
- a private/gated dataset and the user is not authenticated.
|
21 |
+
- unavailable /parquet or /info responses
|
22 |
+
"""
|
23 |
+
|
24 |
+
|
25 |
+
def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
|
26 |
+
"""
|
27 |
+
Get the dataset exported parquet files
|
28 |
+
Docs: https://huggingface.co/docs/datasets-server/parquet
|
29 |
+
"""
|
30 |
+
dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
|
31 |
+
try:
|
32 |
+
parquet_data_files_response = http_get(
|
33 |
+
url=dataset_viewer_parquet_url + dataset,
|
34 |
+
temp_file=None,
|
35 |
+
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
|
36 |
+
timeout=100.0,
|
37 |
+
max_retries=3,
|
38 |
+
)
|
39 |
+
parquet_data_files_response.raise_for_status()
|
40 |
+
if "X-Revision" in parquet_data_files_response.headers:
|
41 |
+
if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
|
42 |
+
parquet_data_files_response_json = parquet_data_files_response.json()
|
43 |
+
if (
|
44 |
+
parquet_data_files_response_json.get("partial") is False
|
45 |
+
and not parquet_data_files_response_json.get("pending", True)
|
46 |
+
and not parquet_data_files_response_json.get("failed", True)
|
47 |
+
and "parquet_files" in parquet_data_files_response_json
|
48 |
+
):
|
49 |
+
return parquet_data_files_response_json["parquet_files"]
|
50 |
+
else:
|
51 |
+
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
|
52 |
+
else:
|
53 |
+
logger.debug(
|
54 |
+
f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
|
55 |
+
)
|
56 |
+
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
|
57 |
+
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
|
58 |
+
raise DatasetViewerError("No exported Parquet files available.")
|
59 |
+
|
60 |
+
|
61 |
+
def get_exported_dataset_infos(
|
62 |
+
dataset: str, revision: str, token: Optional[Union[str, bool]]
|
63 |
+
) -> Dict[str, Dict[str, Any]]:
|
64 |
+
"""
|
65 |
+
Get the dataset information, can be useful to get e.g. the dataset features.
|
66 |
+
Docs: https://huggingface.co/docs/datasets-server/info
|
67 |
+
"""
|
68 |
+
dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
|
69 |
+
try:
|
70 |
+
info_response = http_get(
|
71 |
+
url=dataset_viewer_info_url + dataset,
|
72 |
+
temp_file=None,
|
73 |
+
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
|
74 |
+
timeout=100.0,
|
75 |
+
max_retries=3,
|
76 |
+
)
|
77 |
+
info_response.raise_for_status()
|
78 |
+
if "X-Revision" in info_response.headers:
|
79 |
+
if info_response.headers["X-Revision"] == revision or revision is None:
|
80 |
+
info_response = info_response.json()
|
81 |
+
if (
|
82 |
+
info_response.get("partial") is False
|
83 |
+
and not info_response.get("pending", True)
|
84 |
+
and not info_response.get("failed", True)
|
85 |
+
and "dataset_info" in info_response
|
86 |
+
):
|
87 |
+
return info_response["dataset_info"]
|
88 |
+
else:
|
89 |
+
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
|
90 |
+
else:
|
91 |
+
logger.debug(
|
92 |
+
f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
|
93 |
+
)
|
94 |
+
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
|
95 |
+
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
|
96 |
+
raise DatasetViewerError("No exported dataset infos available.")
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/_dill.py
ADDED
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""Extends `dill` to support pickling more types and produce more consistent dumps."""
|
15 |
+
|
16 |
+
import os
|
17 |
+
import sys
|
18 |
+
from io import BytesIO
|
19 |
+
from types import CodeType, FunctionType
|
20 |
+
|
21 |
+
import dill
|
22 |
+
from packaging import version
|
23 |
+
|
24 |
+
from .. import config
|
25 |
+
|
26 |
+
|
27 |
+
class Pickler(dill.Pickler):
|
28 |
+
dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
|
29 |
+
_legacy_no_dict_keys_sorting = False
|
30 |
+
|
31 |
+
def save(self, obj, save_persistent_id=True):
|
32 |
+
obj_type = type(obj)
|
33 |
+
if obj_type not in self.dispatch:
|
34 |
+
if "regex" in sys.modules:
|
35 |
+
import regex # type: ignore
|
36 |
+
|
37 |
+
if obj_type is regex.Pattern:
|
38 |
+
pklregister(obj_type)(_save_regexPattern)
|
39 |
+
if "spacy" in sys.modules:
|
40 |
+
import spacy # type: ignore
|
41 |
+
|
42 |
+
if issubclass(obj_type, spacy.Language):
|
43 |
+
pklregister(obj_type)(_save_spacyLanguage)
|
44 |
+
if "tiktoken" in sys.modules:
|
45 |
+
import tiktoken # type: ignore
|
46 |
+
|
47 |
+
if obj_type is tiktoken.Encoding:
|
48 |
+
pklregister(obj_type)(_save_tiktokenEncoding)
|
49 |
+
if "torch" in sys.modules:
|
50 |
+
import torch # type: ignore
|
51 |
+
|
52 |
+
if issubclass(obj_type, torch.Tensor):
|
53 |
+
pklregister(obj_type)(_save_torchTensor)
|
54 |
+
|
55 |
+
if obj_type is torch.Generator:
|
56 |
+
pklregister(obj_type)(_save_torchGenerator)
|
57 |
+
|
58 |
+
# Unwrap `torch.compile`-ed modules
|
59 |
+
if issubclass(obj_type, torch.nn.Module):
|
60 |
+
obj = getattr(obj, "_orig_mod", obj)
|
61 |
+
if "transformers" in sys.modules:
|
62 |
+
import transformers # type: ignore
|
63 |
+
|
64 |
+
if issubclass(obj_type, transformers.PreTrainedTokenizerBase):
|
65 |
+
pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase)
|
66 |
+
|
67 |
+
# Unwrap `torch.compile`-ed functions
|
68 |
+
if obj_type is FunctionType:
|
69 |
+
obj = getattr(obj, "_torchdynamo_orig_callable", obj)
|
70 |
+
dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id)
|
71 |
+
|
72 |
+
def _batch_setitems(self, items):
|
73 |
+
if self._legacy_no_dict_keys_sorting:
|
74 |
+
return super()._batch_setitems(items)
|
75 |
+
# Ignore the order of keys in a dict
|
76 |
+
try:
|
77 |
+
# Faster, but fails for unorderable elements
|
78 |
+
items = sorted(items)
|
79 |
+
except Exception: # TypeError, decimal.InvalidOperation, etc.
|
80 |
+
from datasets.fingerprint import Hasher
|
81 |
+
|
82 |
+
items = sorted(items, key=lambda x: Hasher.hash(x[0]))
|
83 |
+
dill.Pickler._batch_setitems(self, items)
|
84 |
+
|
85 |
+
def memoize(self, obj):
|
86 |
+
# Don't memoize strings since two identical strings can have different Python ids
|
87 |
+
if type(obj) is not str: # noqa: E721
|
88 |
+
dill.Pickler.memoize(self, obj)
|
89 |
+
|
90 |
+
|
91 |
+
def pklregister(t):
|
92 |
+
"""Register a custom reducer for the type."""
|
93 |
+
|
94 |
+
def proxy(func):
|
95 |
+
Pickler.dispatch[t] = func
|
96 |
+
return func
|
97 |
+
|
98 |
+
return proxy
|
99 |
+
|
100 |
+
|
101 |
+
def dump(obj, file):
|
102 |
+
"""Pickle an object to a file."""
|
103 |
+
Pickler(file, recurse=True).dump(obj)
|
104 |
+
|
105 |
+
|
106 |
+
def dumps(obj):
|
107 |
+
"""Pickle an object to a string."""
|
108 |
+
file = BytesIO()
|
109 |
+
dump(obj, file)
|
110 |
+
return file.getvalue()
|
111 |
+
|
112 |
+
|
113 |
+
if config.DILL_VERSION < version.parse("0.3.6"):
|
114 |
+
|
115 |
+
def log(pickler, msg):
|
116 |
+
dill._dill.log.info(msg)
|
117 |
+
|
118 |
+
elif config.DILL_VERSION.release[:3] in [
|
119 |
+
version.parse("0.3.6").release,
|
120 |
+
version.parse("0.3.7").release,
|
121 |
+
version.parse("0.3.8").release,
|
122 |
+
]:
|
123 |
+
|
124 |
+
def log(pickler, msg):
|
125 |
+
dill._dill.logger.trace(pickler, msg)
|
126 |
+
|
127 |
+
|
128 |
+
@pklregister(set)
|
129 |
+
def _save_set(pickler, obj):
|
130 |
+
log(pickler, f"Se: {obj}")
|
131 |
+
try:
|
132 |
+
# Faster, but fails for unorderable elements
|
133 |
+
args = (sorted(obj),)
|
134 |
+
except Exception: # TypeError, decimal.InvalidOperation, etc.
|
135 |
+
from datasets.fingerprint import Hasher
|
136 |
+
|
137 |
+
args = (sorted(obj, key=Hasher.hash),)
|
138 |
+
|
139 |
+
pickler.save_reduce(set, args, obj=obj)
|
140 |
+
log(pickler, "# Se")
|
141 |
+
|
142 |
+
|
143 |
+
def _save_regexPattern(pickler, obj):
|
144 |
+
import regex # type: ignore
|
145 |
+
|
146 |
+
log(pickler, f"Re: {obj}")
|
147 |
+
args = (obj.pattern, obj.flags)
|
148 |
+
pickler.save_reduce(regex.compile, args, obj=obj)
|
149 |
+
log(pickler, "# Re")
|
150 |
+
|
151 |
+
|
152 |
+
def _save_tiktokenEncoding(pickler, obj):
|
153 |
+
import tiktoken # type: ignore
|
154 |
+
|
155 |
+
log(pickler, f"Enc: {obj}")
|
156 |
+
args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens)
|
157 |
+
pickler.save_reduce(tiktoken.Encoding, args, obj=obj)
|
158 |
+
log(pickler, "# Enc")
|
159 |
+
|
160 |
+
|
161 |
+
def _save_torchTensor(pickler, obj):
|
162 |
+
import torch # type: ignore
|
163 |
+
|
164 |
+
# `torch.from_numpy` is not picklable in `torch>=1.11.0`
|
165 |
+
def create_torchTensor(np_array):
|
166 |
+
return torch.from_numpy(np_array)
|
167 |
+
|
168 |
+
log(pickler, f"To: {obj}")
|
169 |
+
args = (obj.detach().cpu().numpy(),)
|
170 |
+
pickler.save_reduce(create_torchTensor, args, obj=obj)
|
171 |
+
log(pickler, "# To")
|
172 |
+
|
173 |
+
|
174 |
+
def _save_torchGenerator(pickler, obj):
|
175 |
+
import torch # type: ignore
|
176 |
+
|
177 |
+
def create_torchGenerator(state):
|
178 |
+
generator = torch.Generator()
|
179 |
+
generator.set_state(state)
|
180 |
+
return generator
|
181 |
+
|
182 |
+
log(pickler, f"Ge: {obj}")
|
183 |
+
args = (obj.get_state(),)
|
184 |
+
pickler.save_reduce(create_torchGenerator, args, obj=obj)
|
185 |
+
log(pickler, "# Ge")
|
186 |
+
|
187 |
+
|
188 |
+
def _save_spacyLanguage(pickler, obj):
|
189 |
+
import spacy # type: ignore
|
190 |
+
|
191 |
+
def create_spacyLanguage(config, bytes):
|
192 |
+
lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"])
|
193 |
+
lang_inst = lang_cls.from_config(config)
|
194 |
+
return lang_inst.from_bytes(bytes)
|
195 |
+
|
196 |
+
log(pickler, f"Sp: {obj}")
|
197 |
+
args = (obj.config, obj.to_bytes())
|
198 |
+
pickler.save_reduce(create_spacyLanguage, args, obj=obj)
|
199 |
+
log(pickler, "# Sp")
|
200 |
+
|
201 |
+
|
202 |
+
def _save_transformersPreTrainedTokenizerBase(pickler, obj):
|
203 |
+
log(pickler, f"Tok: {obj}")
|
204 |
+
# Ignore the `cache` attribute
|
205 |
+
state = obj.__dict__
|
206 |
+
if "cache" in state and isinstance(state["cache"], dict):
|
207 |
+
state["cache"] = {}
|
208 |
+
pickler.save_reduce(type(obj), (), state=state, obj=obj)
|
209 |
+
log(pickler, "# Tok")
|
210 |
+
|
211 |
+
|
212 |
+
if config.DILL_VERSION < version.parse("0.3.6"):
|
213 |
+
|
214 |
+
@pklregister(CodeType)
|
215 |
+
def _save_code(pickler, obj):
|
216 |
+
"""
|
217 |
+
From dill._dill.save_code
|
218 |
+
This is a modified version that removes the origin (filename + line no.)
|
219 |
+
of functions created in notebooks or shells for example.
|
220 |
+
"""
|
221 |
+
dill._dill.log.info(f"Co: {obj}")
|
222 |
+
# The filename of a function is the .py file where it is defined.
|
223 |
+
# Filenames of functions created in notebooks or shells start with '<'
|
224 |
+
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
|
225 |
+
# Filenames of functions created in ipykernel the filename
|
226 |
+
# look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
|
227 |
+
# Moreover lambda functions have a special name: '<lambda>'
|
228 |
+
# ex: (lambda x: x).__code__.co_name == "<lambda>" # True
|
229 |
+
#
|
230 |
+
# For the hashing mechanism we ignore where the function has been defined
|
231 |
+
# More specifically:
|
232 |
+
# - we ignore the filename of special functions (filename starts with '<')
|
233 |
+
# - we always ignore the line number
|
234 |
+
# - we only use the base name of the file instead of the whole path,
|
235 |
+
# to be robust in case a script is moved for example.
|
236 |
+
#
|
237 |
+
# Only those two lines are different from the original implementation:
|
238 |
+
co_filename = (
|
239 |
+
""
|
240 |
+
if obj.co_filename.startswith("<")
|
241 |
+
or (
|
242 |
+
len(obj.co_filename.split(os.path.sep)) > 1
|
243 |
+
and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
|
244 |
+
)
|
245 |
+
or obj.co_name == "<lambda>"
|
246 |
+
else os.path.basename(obj.co_filename)
|
247 |
+
)
|
248 |
+
co_firstlineno = 1
|
249 |
+
# The rest is the same as in the original dill implementation
|
250 |
+
if dill._dill.PY3:
|
251 |
+
if hasattr(obj, "co_posonlyargcount"):
|
252 |
+
args = (
|
253 |
+
obj.co_argcount,
|
254 |
+
obj.co_posonlyargcount,
|
255 |
+
obj.co_kwonlyargcount,
|
256 |
+
obj.co_nlocals,
|
257 |
+
obj.co_stacksize,
|
258 |
+
obj.co_flags,
|
259 |
+
obj.co_code,
|
260 |
+
obj.co_consts,
|
261 |
+
obj.co_names,
|
262 |
+
obj.co_varnames,
|
263 |
+
co_filename,
|
264 |
+
obj.co_name,
|
265 |
+
co_firstlineno,
|
266 |
+
obj.co_lnotab,
|
267 |
+
obj.co_freevars,
|
268 |
+
obj.co_cellvars,
|
269 |
+
)
|
270 |
+
else:
|
271 |
+
args = (
|
272 |
+
obj.co_argcount,
|
273 |
+
obj.co_kwonlyargcount,
|
274 |
+
obj.co_nlocals,
|
275 |
+
obj.co_stacksize,
|
276 |
+
obj.co_flags,
|
277 |
+
obj.co_code,
|
278 |
+
obj.co_consts,
|
279 |
+
obj.co_names,
|
280 |
+
obj.co_varnames,
|
281 |
+
co_filename,
|
282 |
+
obj.co_name,
|
283 |
+
co_firstlineno,
|
284 |
+
obj.co_lnotab,
|
285 |
+
obj.co_freevars,
|
286 |
+
obj.co_cellvars,
|
287 |
+
)
|
288 |
+
else:
|
289 |
+
args = (
|
290 |
+
obj.co_argcount,
|
291 |
+
obj.co_nlocals,
|
292 |
+
obj.co_stacksize,
|
293 |
+
obj.co_flags,
|
294 |
+
obj.co_code,
|
295 |
+
obj.co_consts,
|
296 |
+
obj.co_names,
|
297 |
+
obj.co_varnames,
|
298 |
+
co_filename,
|
299 |
+
obj.co_name,
|
300 |
+
co_firstlineno,
|
301 |
+
obj.co_lnotab,
|
302 |
+
obj.co_freevars,
|
303 |
+
obj.co_cellvars,
|
304 |
+
)
|
305 |
+
pickler.save_reduce(CodeType, args, obj=obj)
|
306 |
+
dill._dill.log.info("# Co")
|
307 |
+
return
|
308 |
+
|
309 |
+
elif config.DILL_VERSION.release[:3] in [
|
310 |
+
version.parse("0.3.6").release,
|
311 |
+
version.parse("0.3.7").release,
|
312 |
+
version.parse("0.3.8").release,
|
313 |
+
]:
|
314 |
+
# From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104
|
315 |
+
@pklregister(CodeType)
|
316 |
+
def save_code(pickler, obj):
|
317 |
+
dill._dill.logger.trace(pickler, "Co: %s", obj)
|
318 |
+
|
319 |
+
############################################################################################################
|
320 |
+
# Modification here for huggingface/datasets
|
321 |
+
# The filename of a function is the .py file where it is defined.
|
322 |
+
# Filenames of functions created in notebooks or shells start with '<'
|
323 |
+
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
|
324 |
+
# Filenames of functions created in ipykernel the filename
|
325 |
+
# look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
|
326 |
+
# Moreover lambda functions have a special name: '<lambda>'
|
327 |
+
# ex: (lambda x: x).__code__.co_name == "<lambda>" # True
|
328 |
+
#
|
329 |
+
# For the hashing mechanism we ignore where the function has been defined
|
330 |
+
# More specifically:
|
331 |
+
# - we ignore the filename of special functions (filename starts with '<')
|
332 |
+
# - we always ignore the line number
|
333 |
+
# - we only use the base name of the file instead of the whole path,
|
334 |
+
# to be robust in case a script is moved for example.
|
335 |
+
#
|
336 |
+
# Only those two lines are different from the original implementation:
|
337 |
+
co_filename = (
|
338 |
+
""
|
339 |
+
if obj.co_filename.startswith("<")
|
340 |
+
or (
|
341 |
+
len(obj.co_filename.split(os.path.sep)) > 1
|
342 |
+
and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
|
343 |
+
)
|
344 |
+
or obj.co_name == "<lambda>"
|
345 |
+
else os.path.basename(obj.co_filename)
|
346 |
+
)
|
347 |
+
co_firstlineno = 1
|
348 |
+
# The rest is the same as in the original dill implementation, except for the replacements:
|
349 |
+
# - obj.co_filename => co_filename
|
350 |
+
# - obj.co_firstlineno => co_firstlineno
|
351 |
+
############################################################################################################
|
352 |
+
|
353 |
+
if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)
|
354 |
+
args = (
|
355 |
+
obj.co_lnotab, # for < python 3.10 [not counted in args]
|
356 |
+
obj.co_argcount,
|
357 |
+
obj.co_posonlyargcount,
|
358 |
+
obj.co_kwonlyargcount,
|
359 |
+
obj.co_nlocals,
|
360 |
+
obj.co_stacksize,
|
361 |
+
obj.co_flags,
|
362 |
+
obj.co_code,
|
363 |
+
obj.co_consts,
|
364 |
+
obj.co_names,
|
365 |
+
obj.co_varnames,
|
366 |
+
co_filename, # Modification for huggingface/datasets ############################################
|
367 |
+
obj.co_name,
|
368 |
+
obj.co_qualname,
|
369 |
+
co_firstlineno, # Modification for huggingface/datasets #########################################
|
370 |
+
obj.co_linetable,
|
371 |
+
obj.co_endlinetable,
|
372 |
+
obj.co_columntable,
|
373 |
+
obj.co_exceptiontable,
|
374 |
+
obj.co_freevars,
|
375 |
+
obj.co_cellvars,
|
376 |
+
)
|
377 |
+
elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)
|
378 |
+
args = (
|
379 |
+
obj.co_lnotab, # for < python 3.10 [not counted in args]
|
380 |
+
obj.co_argcount,
|
381 |
+
obj.co_posonlyargcount,
|
382 |
+
obj.co_kwonlyargcount,
|
383 |
+
obj.co_nlocals,
|
384 |
+
obj.co_stacksize,
|
385 |
+
obj.co_flags,
|
386 |
+
obj.co_code,
|
387 |
+
obj.co_consts,
|
388 |
+
obj.co_names,
|
389 |
+
obj.co_varnames,
|
390 |
+
co_filename, # Modification for huggingface/datasets ############################################
|
391 |
+
obj.co_name,
|
392 |
+
obj.co_qualname,
|
393 |
+
co_firstlineno, # Modification for huggingface/datasets #########################################
|
394 |
+
obj.co_linetable,
|
395 |
+
obj.co_exceptiontable,
|
396 |
+
obj.co_freevars,
|
397 |
+
obj.co_cellvars,
|
398 |
+
)
|
399 |
+
elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)
|
400 |
+
args = (
|
401 |
+
obj.co_lnotab, # for < python 3.10 [not counted in args]
|
402 |
+
obj.co_argcount,
|
403 |
+
obj.co_posonlyargcount,
|
404 |
+
obj.co_kwonlyargcount,
|
405 |
+
obj.co_nlocals,
|
406 |
+
obj.co_stacksize,
|
407 |
+
obj.co_flags,
|
408 |
+
obj.co_code,
|
409 |
+
obj.co_consts,
|
410 |
+
obj.co_names,
|
411 |
+
obj.co_varnames,
|
412 |
+
co_filename, # Modification for huggingface/datasets ############################################
|
413 |
+
obj.co_name,
|
414 |
+
co_firstlineno, # Modification for huggingface/datasets #########################################
|
415 |
+
obj.co_linetable,
|
416 |
+
obj.co_freevars,
|
417 |
+
obj.co_cellvars,
|
418 |
+
)
|
419 |
+
elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)
|
420 |
+
args = (
|
421 |
+
obj.co_argcount,
|
422 |
+
obj.co_posonlyargcount,
|
423 |
+
obj.co_kwonlyargcount,
|
424 |
+
obj.co_nlocals,
|
425 |
+
obj.co_stacksize,
|
426 |
+
obj.co_flags,
|
427 |
+
obj.co_code,
|
428 |
+
obj.co_consts,
|
429 |
+
obj.co_names,
|
430 |
+
obj.co_varnames,
|
431 |
+
co_filename, # Modification for huggingface/datasets ############################################
|
432 |
+
obj.co_name,
|
433 |
+
co_firstlineno, # Modification for huggingface/datasets #########################################
|
434 |
+
obj.co_lnotab,
|
435 |
+
obj.co_freevars,
|
436 |
+
obj.co_cellvars,
|
437 |
+
)
|
438 |
+
else: # python 3.7 (15 args)
|
439 |
+
args = (
|
440 |
+
obj.co_argcount,
|
441 |
+
obj.co_kwonlyargcount,
|
442 |
+
obj.co_nlocals,
|
443 |
+
obj.co_stacksize,
|
444 |
+
obj.co_flags,
|
445 |
+
obj.co_code,
|
446 |
+
obj.co_consts,
|
447 |
+
obj.co_names,
|
448 |
+
obj.co_varnames,
|
449 |
+
co_filename, # Modification for huggingface/datasets ############################################
|
450 |
+
obj.co_name,
|
451 |
+
co_firstlineno, # Modification for huggingface/datasets #########################################
|
452 |
+
obj.co_lnotab,
|
453 |
+
obj.co_freevars,
|
454 |
+
obj.co_cellvars,
|
455 |
+
)
|
456 |
+
|
457 |
+
pickler.save_reduce(dill._dill._create_code, args, obj=obj)
|
458 |
+
dill._dill.logger.trace(pickler, "# Co")
|
459 |
+
return
|
llmeval-env/lib/python3.10/site-packages/datasets/utils/_filelock.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding=utf-8
|
3 |
+
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License
|
16 |
+
"""Utilities to handle file locking in `datasets`."""
|
17 |
+
|
18 |
+
import os
|
19 |
+
|
20 |
+
from filelock import FileLock as FileLock_
|
21 |
+
from filelock import UnixFileLock
|
22 |
+
from filelock import __version__ as _filelock_version
|
23 |
+
from packaging import version
|
24 |
+
|
25 |
+
|
26 |
+
class FileLock(FileLock_):
|
27 |
+
"""
|
28 |
+
A `filelock.FileLock` initializer that handles long paths.
|
29 |
+
It also uses the current umask for lock files.
|
30 |
+
"""
|
31 |
+
|
32 |
+
MAX_FILENAME_LENGTH = 255
|
33 |
+
|
34 |
+
def __init__(self, lock_file, *args, **kwargs):
|
35 |
+
# The "mode" argument is required if we want to use the current umask in filelock >= 3.10
|
36 |
+
# In previous previous it was already using the current umask.
|
37 |
+
if "mode" not in kwargs and version.parse(_filelock_version) >= version.parse("3.10.0"):
|
38 |
+
umask = os.umask(0o666)
|
39 |
+
os.umask(umask)
|
40 |
+
kwargs["mode"] = 0o666 & ~umask
|
41 |
+
lock_file = self.hash_filename_if_too_long(lock_file)
|
42 |
+
super().__init__(lock_file, *args, **kwargs)
|
43 |
+
|
44 |
+
@classmethod
|
45 |
+
def hash_filename_if_too_long(cls, path: str) -> str:
|
46 |
+
path = os.path.abspath(os.path.expanduser(path))
|
47 |
+
filename = os.path.basename(path)
|
48 |
+
max_filename_length = cls.MAX_FILENAME_LENGTH
|
49 |
+
if issubclass(cls, UnixFileLock):
|
50 |
+
max_filename_length = min(max_filename_length, os.statvfs(os.path.dirname(path)).f_namemax)
|
51 |
+
if len(filename) > max_filename_length:
|
52 |
+
dirname = os.path.dirname(path)
|
53 |
+
hashed_filename = str(hash(filename))
|
54 |
+
new_filename = (
|
55 |
+
filename[: max_filename_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
|
56 |
+
)
|
57 |
+
return os.path.join(dirname, new_filename)
|
58 |
+
else:
|
59 |
+
return path
|