diff --git a/.gitattributes b/.gitattributes
index 624fc84287de42234cd7ac4ac3b23fc7e9d7c066..7097f473cce5b0d635d6af7a27757da9ed62d65e 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -62,3 +62,5 @@ llmeval-env/bin/python3 filter=lfs diff=lfs merge=lfs -text
llmeval-env/bin/python filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+llmeval-env/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+llmeval-env/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d310d943ec03fcc346837af336faf10ce4a14cac
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_dataset.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37d33e656ff1d09845123af6c9b9dafc99740549
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9579baeb2c8de6e685ddeeec8a4d7a649ad1d120
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dfc3149f8dae5e38acafba49d07d4a8d688753ef
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/config.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99f03a8110182797cac8f05e2fd149bf70f9e930
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/distributed.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5b028cbaf05ba49021b2417a6b47ece166b73f7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/fingerprint.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1b2aaa503c811d31e890e9e926df74d9d7bf86c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd50e080306048ebecb361039a3e2731dd815b40
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/keyhash.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..108358e3e6e89686abd555b43869e6d25b56d2bb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/load.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e071ec7a2bcd0068cbafd21467171134e97c442e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/search.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bc4bcd2315d96f1de843877af797d6cd57a75bb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/streaming.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5026555ea001c4049b724ad5fef0ac83ea17589d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/table.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/features/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..65ae879a2bb7ffbd7f142c97e4558c8f25a51b6a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/features/__init__.py
@@ -0,0 +1,20 @@
+# ruff: noqa
+
+__all__ = [
+ "Audio",
+ "Array2D",
+ "Array3D",
+ "Array4D",
+ "Array5D",
+ "ClassLabel",
+ "Features",
+ "Sequence",
+ "Value",
+ "Image",
+ "Translation",
+ "TranslationVariableLanguages",
+]
+from .audio import Audio
+from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
+from .image import Image
+from .translation import Translation, TranslationVariableLanguages
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34069c8f0973b865d7ea0b7487a8d085d5932280
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..645e427d40bb63bb3f818683c2f450fcc5ed8150
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18f0a6163116d64d217f2ea56493331028afcc0d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..104cb6cb5ee42f6c280e8f7db883de909931e99b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2343202939252da6d9dec9000f647606c0615c69
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/audio.py b/llmeval-env/lib/python3.10/site-packages/datasets/features/audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7df47b7a061801196db39eca222c9e4d6f9e599
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/features/audio.py
@@ -0,0 +1,277 @@
+import os
+from dataclasses import dataclass, field
+from io import BytesIO
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..download.download_config import DownloadConfig
+from ..table import array_cast
+from ..utils.file_utils import xopen, xsplitext
+from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
+
+
+if TYPE_CHECKING:
+ from .features import FeatureType
+
+
+@dataclass
+class Audio:
+ """Audio [`Feature`] to extract audio data from an audio file.
+
+ Input: The Audio feature accepts as input:
+ - A `str`: Absolute path to the audio file (i.e. random access is allowed).
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the audio file to the archive file.
+ - `bytes`: Bytes content of the audio file.
+
+ This is useful for archived files with sequential access.
+
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the audio file to the archive file.
+ - `array`: Array containing the audio sample
+ - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
+
+ This is useful for archived files with sequential access.
+
+ Args:
+ sampling_rate (`int`, *optional*):
+ Target sampling rate. If `None`, the native sampling rate is used.
+ mono (`bool`, defaults to `True`):
+ Whether to convert the audio signal to mono by averaging samples across
+ channels.
+ decode (`bool`, defaults to `True`):
+ Whether to decode the audio data. If `False`,
+ returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, Audio
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
+ >>> ds[0]["audio"]
+ {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
+ 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
+ 'sampling_rate': 16000}
+ ```
+ """
+
+ sampling_rate: Optional[int] = None
+ mono: bool = True
+ decode: bool = True
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "dict"
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
+ _type: str = field(default="Audio", init=False, repr=False)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value: Union[str, bytes, dict]) -> dict:
+ """Encode example into a format for Arrow.
+
+ Args:
+ value (`str` or `dict`):
+ Data passed as input to Audio feature.
+
+ Returns:
+ `dict`
+ """
+ try:
+ import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
+ except ImportError as err:
+ raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
+ if isinstance(value, str):
+ return {"bytes": None, "path": value}
+ elif isinstance(value, bytes):
+ return {"bytes": value, "path": None}
+ elif "array" in value:
+ # convert the audio array to wav bytes
+ buffer = BytesIO()
+ sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
+ return {"bytes": buffer.getvalue(), "path": None}
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
+ # we set "bytes": None to not duplicate the data if they're already available locally
+ if value["path"].endswith("pcm"):
+ # "PCM" only has raw audio bytes
+ if value.get("sampling_rate") is None:
+ # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
+ raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
+ if value.get("bytes"):
+ # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
+ bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
+ else:
+ bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
+
+ buffer = BytesIO(bytes())
+ sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
+ return {"bytes": buffer.getvalue(), "path": None}
+ else:
+ return {"bytes": None, "path": value.get("path")}
+ elif value.get("bytes") is not None or value.get("path") is not None:
+ # store the audio bytes, and path is used to infer the audio format using the file extension
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
+ else:
+ raise ValueError(
+ f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
+ )
+
+ def decode_example(
+ self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
+ ) -> dict:
+ """Decode example audio file into audio data.
+
+ Args:
+ value (`dict`):
+ A dictionary with keys:
+
+ - `path`: String with relative audio file path.
+ - `bytes`: Bytes of the audio file.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode
+ audio files from private repositories on the Hub, you can pass
+ a dictionary repo_id (`str`) -> token (`bool` or `str`)
+
+ Returns:
+ `dict`
+ """
+ if not self.decode:
+ raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
+
+ path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
+ if path is None and file is None:
+ raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
+
+ try:
+ import librosa
+ import soundfile as sf
+ except ImportError as err:
+ raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
+
+ audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
+ if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
+ raise RuntimeError(
+ "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
+ )
+ elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
+ raise RuntimeError(
+ "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
+ )
+
+ if file is None:
+ token_per_repo_id = token_per_repo_id or {}
+ source_url = path.split("::")[-1]
+ pattern = (
+ config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
+ )
+ try:
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
+ token = token_per_repo_id[repo_id]
+ except (ValueError, KeyError):
+ token = None
+
+ download_config = DownloadConfig(token=token)
+ with xopen(path, "rb", download_config=download_config) as f:
+ array, sampling_rate = sf.read(f)
+
+ else:
+ array, sampling_rate = sf.read(file)
+
+ array = array.T
+ if self.mono:
+ array = librosa.to_mono(array)
+ if self.sampling_rate and self.sampling_rate != sampling_rate:
+ array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
+ sampling_rate = self.sampling_rate
+
+ return {"path": path, "array": array, "sampling_rate": sampling_rate}
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
+ from .features import Value
+
+ if self.decode:
+ raise ValueError("Cannot flatten a decoded Audio feature.")
+ return {
+ "bytes": Value("binary"),
+ "path": Value("string"),
+ }
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
+ """Cast an Arrow array to the Audio arrow storage type.
+ The Arrow types that can be converted to the Audio pyarrow storage type are:
+
+ - `pa.string()` - it must contain the "path" data
+ - `pa.binary()` - it must contain the audio bytes
+ - `pa.struct({"bytes": pa.binary()})`
+ - `pa.struct({"path": pa.string()})`
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
+
+ Args:
+ storage (`Union[pa.StringArray, pa.StructArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`
+ """
+ if pa.types.is_string(storage.type):
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_binary(storage.type):
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
+ storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
+ elif pa.types.is_struct(storage.type):
+ if storage.type.get_field_index("bytes") >= 0:
+ bytes_array = storage.field("bytes")
+ else:
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ if storage.type.get_field_index("path") >= 0:
+ path_array = storage.field("path")
+ else:
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
+ return array_cast(storage, self.pa_type)
+
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
+ """Embed audio files into the Arrow array.
+
+ Args:
+ storage (`pa.StructArray`):
+ PyArrow array to embed.
+
+ Returns:
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+
+ @no_op_if_value_is_null
+ def path_to_bytes(path):
+ with xopen(path, "rb") as f:
+ bytes_ = f.read()
+ return bytes_
+
+ bytes_array = pa.array(
+ [
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
+ for x in storage.to_pylist()
+ ],
+ type=pa.binary(),
+ )
+ path_array = pa.array(
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
+ type=pa.string(),
+ )
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
+ return array_cast(storage, self.pa_type)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/features.py b/llmeval-env/lib/python3.10/site-packages/datasets/features/features.py
new file mode 100644
index 0000000000000000000000000000000000000000..893c9f9a1b0b0722471345dc2f3c68f537dfb554
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/features/features.py
@@ -0,0 +1,2202 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""This class handle features definition in datasets and some utilities to display table type."""
+
+import copy
+import json
+import re
+import sys
+from collections.abc import Iterable, Mapping
+from collections.abc import Sequence as SequenceABC
+from dataclasses import InitVar, dataclass, field, fields
+from functools import reduce, wraps
+from operator import mul
+from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
+from typing import Sequence as Sequence_
+
+import numpy as np
+import pandas as pd
+import pyarrow as pa
+import pyarrow.compute as pc
+import pyarrow.types
+import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
+from pandas.api.extensions import ExtensionArray as PandasExtensionArray
+from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
+
+from .. import config
+from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
+from ..table import array_cast
+from ..utils import experimental, logging
+from ..utils.py_utils import asdict, first_non_null_value, zip_dict
+from .audio import Audio
+from .image import Image, encode_pil_image
+from .translation import Translation, TranslationVariableLanguages
+
+
+logger = logging.get_logger(__name__)
+
+
+def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
+ """
+ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
+ """
+ if pyarrow.types.is_null(arrow_type):
+ return "null"
+ elif pyarrow.types.is_boolean(arrow_type):
+ return "bool"
+ elif pyarrow.types.is_int8(arrow_type):
+ return "int8"
+ elif pyarrow.types.is_int16(arrow_type):
+ return "int16"
+ elif pyarrow.types.is_int32(arrow_type):
+ return "int32"
+ elif pyarrow.types.is_int64(arrow_type):
+ return "int64"
+ elif pyarrow.types.is_uint8(arrow_type):
+ return "uint8"
+ elif pyarrow.types.is_uint16(arrow_type):
+ return "uint16"
+ elif pyarrow.types.is_uint32(arrow_type):
+ return "uint32"
+ elif pyarrow.types.is_uint64(arrow_type):
+ return "uint64"
+ elif pyarrow.types.is_float16(arrow_type):
+ return "float16" # pyarrow dtype is "halffloat"
+ elif pyarrow.types.is_float32(arrow_type):
+ return "float32" # pyarrow dtype is "float"
+ elif pyarrow.types.is_float64(arrow_type):
+ return "float64" # pyarrow dtype is "double"
+ elif pyarrow.types.is_time32(arrow_type):
+ return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
+ elif pyarrow.types.is_time64(arrow_type):
+ return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
+ elif pyarrow.types.is_timestamp(arrow_type):
+ if arrow_type.tz is None:
+ return f"timestamp[{arrow_type.unit}]"
+ elif arrow_type.tz:
+ return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
+ else:
+ raise ValueError(f"Unexpected timestamp object {arrow_type}.")
+ elif pyarrow.types.is_date32(arrow_type):
+ return "date32" # pyarrow dtype is "date32[day]"
+ elif pyarrow.types.is_date64(arrow_type):
+ return "date64" # pyarrow dtype is "date64[ms]"
+ elif pyarrow.types.is_duration(arrow_type):
+ return f"duration[{arrow_type.unit}]"
+ elif pyarrow.types.is_decimal128(arrow_type):
+ return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
+ elif pyarrow.types.is_decimal256(arrow_type):
+ return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
+ elif pyarrow.types.is_binary(arrow_type):
+ return "binary"
+ elif pyarrow.types.is_large_binary(arrow_type):
+ return "large_binary"
+ elif pyarrow.types.is_string(arrow_type):
+ return "string"
+ elif pyarrow.types.is_large_string(arrow_type):
+ return "large_string"
+ else:
+ raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
+
+
+def string_to_arrow(datasets_dtype: str) -> pa.DataType:
+ """
+ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
+
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
+
+ This is necessary because the datasets.Value() primitive type is constructed using a string dtype
+
+ Value(dtype=str)
+
+ But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
+ which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
+ purpose of this function.
+ """
+
+ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
+ msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
+ if examples:
+ examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
+ msg += f"\nValid examples include: {examples}."
+ if urls:
+ urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
+ msg += f"\nFor more insformation, see: {urls}."
+ return msg
+
+ if datasets_dtype in pa.__dict__:
+ return pa.__dict__[datasets_dtype]()
+
+ if (datasets_dtype + "_") in pa.__dict__:
+ return pa.__dict__[datasets_dtype + "_"]()
+
+ timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
+ if timestamp_matches:
+ timestamp_internals = timestamp_matches.group(1)
+ internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
+ if timestamp_internals in ["s", "ms", "us", "ns"]:
+ return pa.timestamp(timestamp_internals)
+ elif internals_matches:
+ return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "timestamp",
+ examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
+ )
+ )
+
+ duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
+ if duration_matches:
+ duration_internals = duration_matches.group(1)
+ if duration_internals in ["s", "ms", "us", "ns"]:
+ return pa.duration(duration_internals)
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "duration",
+ examples=["duration[s]", "duration[us]"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
+ )
+ )
+
+ time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
+ if time_matches:
+ time_internals_bits = time_matches.group(1)
+ if time_internals_bits == "32":
+ time_internals_unit = time_matches.group(2)
+ if time_internals_unit in ["s", "ms"]:
+ return pa.time32(time_internals_unit)
+ else:
+ raise ValueError(
+ f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
+ )
+ elif time_internals_bits == "64":
+ time_internals_unit = time_matches.group(2)
+ if time_internals_unit in ["us", "ns"]:
+ return pa.time64(time_internals_unit)
+ else:
+ raise ValueError(
+ f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
+ )
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "time",
+ examples=["time32[s]", "time64[us]"],
+ urls=[
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
+ ],
+ )
+ )
+
+ decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
+ if decimal_matches:
+ decimal_internals_bits = decimal_matches.group(1)
+ if decimal_internals_bits == "128":
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
+ if decimal_internals_precision_and_scale:
+ precision = decimal_internals_precision_and_scale.group(1)
+ scale = decimal_internals_precision_and_scale.group(2)
+ return pa.decimal128(int(precision), int(scale))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal128",
+ examples=["decimal128(10, 2)", "decimal128(4, -2)"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
+ )
+ )
+ elif decimal_internals_bits == "256":
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
+ if decimal_internals_precision_and_scale:
+ precision = decimal_internals_precision_and_scale.group(1)
+ scale = decimal_internals_precision_and_scale.group(2)
+ return pa.decimal256(int(precision), int(scale))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal256",
+ examples=["decimal256(30, 2)", "decimal256(38, -4)"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
+ )
+ )
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal",
+ examples=["decimal128(12, 3)", "decimal256(40, 6)"],
+ urls=[
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
+ ],
+ )
+ )
+
+ raise ValueError(
+ f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
+ f"Please make sure to use a correct data type, see: "
+ f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
+ )
+
+
+def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
+ """
+ Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
+ It works recursively.
+
+ If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
+
+ Args:
+ obj: the object (nested struct) to cast.
+ only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
+ Indeed Arrow only support converting 1-dimensional array values.
+ optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
+ and if it doesn't, not checking the rest of the list elements.
+
+ Returns:
+ casted_obj: the casted object
+ has_changed (bool): True if the object has been changed, False if it is identical
+ """
+
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
+ import tensorflow as tf
+
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
+ import jax.numpy as jnp
+
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ if isinstance(obj, np.ndarray):
+ if obj.ndim == 0:
+ return obj[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj, False
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj
+ ],
+ True,
+ )
+ elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
+ if obj.ndim == 0:
+ return obj.detach().cpu().numpy()[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj.detach().cpu().numpy(), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj.detach().cpu().numpy()
+ ],
+ True,
+ )
+ elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
+ if obj.ndim == 0:
+ return obj.numpy()[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj.numpy(), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj.numpy()
+ ],
+ True,
+ )
+ elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
+ if obj.ndim == 0:
+ return np.asarray(obj)[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return np.asarray(obj), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in np.asarray(obj)
+ ],
+ True,
+ )
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
+ return encode_pil_image(obj), True
+ elif isinstance(obj, pd.Series):
+ return (
+ _cast_to_python_objects(
+ obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0],
+ True,
+ )
+ elif isinstance(obj, pd.DataFrame):
+ return (
+ {
+ key: _cast_to_python_objects(
+ value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for key, value in obj.to_dict("series").items()
+ },
+ True,
+ )
+ elif isinstance(obj, pd.Timestamp):
+ return obj.to_pydatetime(), True
+ elif isinstance(obj, pd.Timedelta):
+ return obj.to_pytimedelta(), True
+ elif isinstance(obj, Mapping):
+ has_changed = not isinstance(obj, dict)
+ output = {}
+ for k, v in obj.items():
+ casted_v, has_changed_v = _cast_to_python_objects(
+ v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )
+ has_changed |= has_changed_v
+ output[k] = casted_v
+ return output if has_changed else obj, has_changed
+ elif hasattr(obj, "__array__"):
+ return (
+ _cast_to_python_objects(
+ obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0],
+ True,
+ )
+ elif isinstance(obj, (list, tuple)):
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt):
+ break
+ casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
+ first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )
+ if has_changed_first_elmt or not optimize_list_casting:
+ return (
+ [
+ _cast_to_python_objects(
+ elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for elmt in obj
+ ],
+ True,
+ )
+ else:
+ if isinstance(obj, (list, tuple)):
+ return obj, False
+ else:
+ return list(obj), True
+ else:
+ return obj, False
+ else:
+ return obj, False
+
+
+def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
+ """
+ Cast numpy/pytorch/tensorflow/pandas objects to python lists.
+ It works recursively.
+
+ If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
+
+ Args:
+ obj: the object (nested struct) to cast
+ only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
+ Indeed Arrow only support converting 1-dimensional array values.
+ optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
+ and if it doesn't, not checking the rest of the list elements.
+
+ Returns:
+ casted_obj: the casted object
+ """
+ return _cast_to_python_objects(
+ obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+
+
+@dataclass
+class Value:
+ """
+ The `Value` dtypes are as follows:
+
+ - `null`
+ - `bool`
+ - `int8`
+ - `int16`
+ - `int32`
+ - `int64`
+ - `uint8`
+ - `uint16`
+ - `uint32`
+ - `uint64`
+ - `float16`
+ - `float32` (alias float)
+ - `float64` (alias double)
+ - `time32[(s|ms)]`
+ - `time64[(us|ns)]`
+ - `timestamp[(s|ms|us|ns)]`
+ - `timestamp[(s|ms|us|ns), tz=(tzstring)]`
+ - `date32`
+ - `date64`
+ - `duration[(s|ms|us|ns)]`
+ - `decimal128(precision, scale)`
+ - `decimal256(precision, scale)`
+ - `binary`
+ - `large_binary`
+ - `string`
+ - `large_string`
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'stars': Value(dtype='int32')})
+ >>> features
+ {'stars': Value(dtype='int32', id=None)}
+ ```
+ """
+
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="Value", init=False, repr=False)
+
+ def __post_init__(self):
+ if self.dtype == "double": # fix inferred type
+ self.dtype = "float64"
+ if self.dtype == "float": # fix inferred type
+ self.dtype = "float32"
+ self.pa_type = string_to_arrow(self.dtype)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value):
+ if pa.types.is_boolean(self.pa_type):
+ return bool(value)
+ elif pa.types.is_integer(self.pa_type):
+ return int(value)
+ elif pa.types.is_floating(self.pa_type):
+ return float(value)
+ elif pa.types.is_string(self.pa_type):
+ return str(value)
+ else:
+ return value
+
+
+class _ArrayXD:
+ def __post_init__(self):
+ self.shape = tuple(self.shape)
+
+ def __call__(self):
+ pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
+ return pa_type
+
+ def encode_example(self, value):
+ return value
+
+
+@dataclass
+class Array2D(_ArrayXD):
+ """Create a two-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array2D", init=False, repr=False)
+
+
+@dataclass
+class Array3D(_ArrayXD):
+ """Create a three-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array3D", init=False, repr=False)
+
+
+@dataclass
+class Array4D(_ArrayXD):
+ """Create a four-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array4D", init=False, repr=False)
+
+
+@dataclass
+class Array5D(_ArrayXD):
+ """Create a five-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array5D", init=False, repr=False)
+
+
+class _ArrayXDExtensionType(pa.ExtensionType):
+ ndims: Optional[int] = None
+
+ def __init__(self, shape: tuple, dtype: str):
+ if self.ndims is None or self.ndims <= 1:
+ raise ValueError("You must instantiate an array type with a value for dim that is > 1")
+ if len(shape) != self.ndims:
+ raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
+ for dim in range(1, self.ndims):
+ if shape[dim] is None:
+ raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
+ self.shape = tuple(shape)
+ self.value_type = dtype
+ self.storage_dtype = self._generate_dtype(self.value_type)
+ pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
+
+ def __arrow_ext_serialize__(self):
+ return json.dumps((self.shape, self.value_type)).encode()
+
+ @classmethod
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
+ args = json.loads(serialized)
+ return cls(*args)
+
+ # This was added to pa.ExtensionType in pyarrow >= 13.0.0
+ def __reduce__(self):
+ return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
+
+ def __hash__(self):
+ return hash((self.__class__, self.shape, self.value_type))
+
+ def __arrow_ext_class__(self):
+ return ArrayExtensionArray
+
+ def _generate_dtype(self, dtype):
+ dtype = string_to_arrow(dtype)
+ for d in reversed(self.shape):
+ dtype = pa.list_(dtype)
+ # Don't specify the size of the list, since fixed length list arrays have issues
+ # being validated after slicing in pyarrow 0.17.1
+ return dtype
+
+ def to_pandas_dtype(self):
+ return PandasArrayExtensionDtype(self.value_type)
+
+
+class Array2DExtensionType(_ArrayXDExtensionType):
+ ndims = 2
+
+
+class Array3DExtensionType(_ArrayXDExtensionType):
+ ndims = 3
+
+
+class Array4DExtensionType(_ArrayXDExtensionType):
+ ndims = 4
+
+
+class Array5DExtensionType(_ArrayXDExtensionType):
+ ndims = 5
+
+
+# Register the extension types for deserialization
+pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
+pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
+pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
+pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
+
+
+def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
+ """
+ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
+ This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
+
+ # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
+ # primitive types are types for which the physical representation in arrow and in numpy
+ # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
+ # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
+ # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
+ """
+
+ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
+ if pa.types.is_list(pa_type):
+ return _unnest_pa_type(pa_type.value_type)
+ return pa_type
+
+ if unnest:
+ pa_type = _unnest_pa_type(pa_type)
+ return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
+
+
+class ArrayExtensionArray(pa.ExtensionArray):
+ def __array__(self):
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
+ return self.to_numpy(zero_copy_only=zero_copy_only)
+
+ def __getitem__(self, i):
+ return self.storage[i]
+
+ def to_numpy(self, zero_copy_only=True):
+ storage: pa.ListArray = self.storage
+ null_mask = storage.is_null().to_numpy(zero_copy_only=False)
+
+ if self.type.shape[0] is not None:
+ size = 1
+ null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
+
+ for i in range(self.type.ndims):
+ size *= self.type.shape[i]
+ storage = storage.flatten()
+ numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
+ numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
+
+ if len(null_indices):
+ numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
+
+ else:
+ shape = self.type.shape
+ ndims = self.type.ndims
+ arrays = []
+ first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
+ for i, is_null in enumerate(null_mask):
+ if is_null:
+ arrays.append(np.nan)
+ else:
+ storage_el = storage[i : i + 1]
+ first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
+ # flatten storage
+ for _ in range(ndims):
+ storage_el = storage_el.flatten()
+
+ numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
+ arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
+
+ if len(np.unique(np.diff(first_dim_offsets))) > 1:
+ # ragged
+ numpy_arr = np.empty(len(arrays), dtype=object)
+ numpy_arr[:] = arrays
+ else:
+ numpy_arr = np.array(arrays)
+
+ return numpy_arr
+
+ def to_pylist(self):
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
+ numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
+ if self.type.shape[0] is None and numpy_arr.dtype == object:
+ return [arr.tolist() for arr in numpy_arr.tolist()]
+ else:
+ return numpy_arr.tolist()
+
+
+class PandasArrayExtensionDtype(PandasExtensionDtype):
+ _metadata = "value_type"
+
+ def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
+ self._value_type = value_type
+
+ def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
+ if isinstance(array, pa.ChunkedArray):
+ array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
+ zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
+ numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
+ return PandasArrayExtensionArray(numpy_arr)
+
+ @classmethod
+ def construct_array_type(cls):
+ return PandasArrayExtensionArray
+
+ @property
+ def type(self) -> type:
+ return np.ndarray
+
+ @property
+ def kind(self) -> str:
+ return "O"
+
+ @property
+ def name(self) -> str:
+ return f"array[{self.value_type}]"
+
+ @property
+ def value_type(self) -> np.dtype:
+ return self._value_type
+
+
+class PandasArrayExtensionArray(PandasExtensionArray):
+ def __init__(self, data: np.ndarray, copy: bool = False):
+ self._data = data if not copy else np.array(data)
+ self._dtype = PandasArrayExtensionDtype(data.dtype)
+
+ def __array__(self, dtype=None):
+ """
+ Convert to NumPy Array.
+ Note that Pandas expects a 1D array when dtype is set to object.
+ But for other dtypes, the returned shape is the same as the one of ``data``.
+
+ More info about pandas 1D requirement for PandasExtensionArray here:
+ https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
+
+ """
+ if dtype == object:
+ out = np.empty(len(self._data), dtype=object)
+ for i in range(len(self._data)):
+ out[i] = self._data[i]
+ return out
+ if dtype is None:
+ return self._data
+ else:
+ return self._data.astype(dtype)
+
+ def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
+ return PandasArrayExtensionArray(self._data, copy=True)
+
+ @classmethod
+ def _from_sequence(
+ cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
+ ) -> "PandasArrayExtensionArray":
+ if len(scalars) > 1 and all(
+ isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
+ ):
+ data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
+ else:
+ data = np.empty(len(scalars), dtype=object)
+ data[:] = scalars
+ return cls(data, copy=copy)
+
+ @classmethod
+ def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
+ if len(to_concat) > 1 and all(
+ va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
+ for va in to_concat
+ ):
+ data = np.vstack([va._data for va in to_concat])
+ else:
+ data = np.empty(len(to_concat), dtype=object)
+ data[:] = [va._data for va in to_concat]
+ return cls(data, copy=False)
+
+ @property
+ def dtype(self) -> PandasArrayExtensionDtype:
+ return self._dtype
+
+ @property
+ def nbytes(self) -> int:
+ return self._data.nbytes
+
+ def isna(self) -> np.ndarray:
+ return np.array([pd.isna(arr).any() for arr in self._data])
+
+ def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
+ raise NotImplementedError()
+
+ def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
+ if isinstance(item, int):
+ return self._data[item]
+ return PandasArrayExtensionArray(self._data[item], copy=False)
+
+ def take(
+ self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
+ ) -> "PandasArrayExtensionArray":
+ indices: np.ndarray = np.asarray(indices, dtype=int)
+ if allow_fill:
+ fill_value = (
+ self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
+ )
+ mask = indices == -1
+ if (indices < -1).any():
+ raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
+ elif len(self) > 0:
+ pass
+ elif not np.all(mask):
+ raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
+ else:
+ data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
+ return PandasArrayExtensionArray(data, copy=False)
+ took = self._data.take(indices, axis=0)
+ if allow_fill and mask.any():
+ took[mask] = [fill_value] * np.sum(mask)
+ return PandasArrayExtensionArray(took, copy=False)
+
+ def __len__(self) -> int:
+ return len(self._data)
+
+ def __eq__(self, other) -> np.ndarray:
+ if not isinstance(other, PandasArrayExtensionArray):
+ raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
+ return (self._data == other._data).all()
+
+
+def pandas_types_mapper(dtype):
+ if isinstance(dtype, _ArrayXDExtensionType):
+ return PandasArrayExtensionDtype(dtype.value_type)
+
+
+@dataclass
+class ClassLabel:
+ """Feature type for integer class labels.
+
+ There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
+
+ * `num_classes`: Create 0 to (num_classes-1) labels.
+ * `names`: List of label strings.
+ * `names_file`: File containing the list of labels.
+
+ Under the hood the labels are stored as integers.
+ You can use negative integers to represent unknown/missing labels.
+
+ Args:
+ num_classes (`int`, *optional*):
+ Number of classes. All labels must be < `num_classes`.
+ names (`list` of `str`, *optional*):
+ String names for the integer classes.
+ The order in which the names are provided is kept.
+ names_file (`str`, *optional*):
+ Path to a file with names for the integer classes, one per line.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
+ >>> features
+ {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
+ ```
+ """
+
+ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
+ names: List[str] = None
+ names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "int64"
+ pa_type: ClassVar[Any] = pa.int64()
+ _str2int: ClassVar[Dict[str, int]] = None
+ _int2str: ClassVar[Dict[int, int]] = None
+ _type: str = field(default="ClassLabel", init=False, repr=False)
+
+ def __post_init__(self, num_classes, names_file):
+ self.num_classes = num_classes
+ self.names_file = names_file
+ if self.names_file is not None and self.names is not None:
+ raise ValueError("Please provide either names or names_file but not both.")
+ # Set self.names
+ if self.names is None:
+ if self.names_file is not None:
+ self.names = self._load_names_from_file(self.names_file)
+ elif self.num_classes is not None:
+ self.names = [str(i) for i in range(self.num_classes)]
+ else:
+ raise ValueError("Please provide either num_classes, names or names_file.")
+ elif not isinstance(self.names, SequenceABC):
+ raise TypeError(f"Please provide names as a list, is {type(self.names)}")
+ # Set self.num_classes
+ if self.num_classes is None:
+ self.num_classes = len(self.names)
+ elif self.num_classes != len(self.names):
+ raise ValueError(
+ "ClassLabel number of names do not match the defined num_classes. "
+ f"Got {len(self.names)} names VS {self.num_classes} num_classes"
+ )
+ # Prepare mappings
+ self._int2str = [str(name) for name in self.names]
+ self._str2int = {name: i for i, name in enumerate(self._int2str)}
+ if len(self._int2str) != len(self._str2int):
+ raise ValueError("Some label names are duplicated. Each label name should be unique.")
+
+ def __call__(self):
+ return self.pa_type
+
+ def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
+ """Conversion class name `string` => `integer`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> ds.features["label"].str2int('neg')
+ 0
+ ```
+ """
+ if not isinstance(values, str) and not isinstance(values, Iterable):
+ raise ValueError(
+ f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
+ )
+ return_list = True
+ if isinstance(values, str):
+ values = [values]
+ return_list = False
+
+ output = [self._strval2int(value) for value in values]
+ return output if return_list else output[0]
+
+ def _strval2int(self, value: str) -> int:
+ failed_parse = False
+ value = str(value)
+ # first attempt - raw string value
+ int_value = self._str2int.get(value)
+ if int_value is None:
+ # second attempt - strip whitespace
+ int_value = self._str2int.get(value.strip())
+ if int_value is None:
+ # third attempt - convert str to int
+ try:
+ int_value = int(value)
+ except ValueError:
+ failed_parse = True
+ else:
+ if int_value < -1 or int_value >= self.num_classes:
+ failed_parse = True
+ if failed_parse:
+ raise ValueError(f"Invalid string class label {value}")
+ return int_value
+
+ def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
+ """Conversion `integer` => class name `string`.
+
+ Regarding unknown/missing labels: passing negative integers raises `ValueError`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> ds.features["label"].int2str(0)
+ 'neg'
+ ```
+ """
+ if not isinstance(values, int) and not isinstance(values, Iterable):
+ raise ValueError(
+ f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
+ )
+ return_list = True
+ if isinstance(values, int):
+ values = [values]
+ return_list = False
+
+ for v in values:
+ if not 0 <= v < self.num_classes:
+ raise ValueError(f"Invalid integer class label {v:d}")
+
+ output = [self._int2str[int(v)] for v in values]
+ return output if return_list else output[0]
+
+ def encode_example(self, example_data):
+ if self.num_classes is None:
+ raise ValueError(
+ "Trying to use ClassLabel feature with undefined number of class. "
+ "Please set ClassLabel.names or num_classes."
+ )
+
+ # If a string is given, convert to associated integer
+ if isinstance(example_data, str):
+ example_data = self.str2int(example_data)
+
+ # Allowing -1 to mean no label.
+ if not -1 <= example_data < self.num_classes:
+ raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
+ return example_data
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
+ """Cast an Arrow array to the `ClassLabel` arrow storage type.
+ The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
+
+ - `pa.string()`
+ - `pa.int()`
+
+ Args:
+ storage (`Union[pa.StringArray, pa.IntegerArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
+ """
+ if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
+ min_max = pc.min_max(storage).as_py()
+ if min_max["max"] is not None and min_max["max"] >= self.num_classes:
+ raise ValueError(
+ f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
+ )
+ elif isinstance(storage, pa.StringArray):
+ storage = pa.array(
+ [self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
+ )
+ return array_cast(storage, self.pa_type)
+
+ @staticmethod
+ def _load_names_from_file(names_filepath):
+ with open(names_filepath, encoding="utf-8") as f:
+ return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
+
+
+@dataclass
+class Sequence:
+ """Construct a list of feature from a single type or a dict of types.
+ Mostly here for compatiblity with tfds.
+
+ Args:
+ feature:
+ A list of features of a single type or a dictionary of types.
+ length (`int`):
+ Length of the sequence.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features, Sequence, Value, ClassLabel
+ >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
+ >>> features
+ {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
+ ```
+ """
+
+ feature: Any
+ length: int = -1
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "list"
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="Sequence", init=False, repr=False)
+
+
+FeatureType = Union[
+ dict,
+ list,
+ tuple,
+ Value,
+ ClassLabel,
+ Translation,
+ TranslationVariableLanguages,
+ Sequence,
+ Array2D,
+ Array3D,
+ Array4D,
+ Array5D,
+ Audio,
+ Image,
+]
+
+
+def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
+ """
+ Check if the object is not None.
+ If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
+ """
+ if obj is None:
+ return False
+ elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
+ if len(obj) > 0:
+ if schema is None:
+ pass
+ elif isinstance(schema, (list, tuple)):
+ schema = schema[0]
+ else:
+ schema = schema.feature
+ return _check_non_null_non_empty_recursive(obj[0], schema)
+ else:
+ return False
+ else:
+ return True
+
+
+def get_nested_type(schema: FeatureType) -> pa.DataType:
+ """
+ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
+ generate_from_arrow_type().
+
+ It performs double-duty as the implementation of Features.type and handles the conversion of
+ datasets.Feature->pa.struct
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, Features):
+ return pa.struct(
+ {key: get_nested_type(schema[key]) for key in schema}
+ ) # Features is subclass of dict, and dict order is deterministic since Python 3.6
+ elif isinstance(schema, dict):
+ return pa.struct(
+ {key: get_nested_type(schema[key]) for key in schema}
+ ) # however don't sort on struct types since the order matters
+ elif isinstance(schema, (list, tuple)):
+ if len(schema) != 1:
+ raise ValueError("When defining list feature, you should just provide one example of the inner type")
+ value_type = get_nested_type(schema[0])
+ return pa.list_(value_type)
+ elif isinstance(schema, Sequence):
+ value_type = get_nested_type(schema.feature)
+ # We allow to reverse list of dict => dict of list for compatibility with tfds
+ if isinstance(schema.feature, dict):
+ return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
+ return pa.list_(value_type, schema.length)
+
+ # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
+ return schema()
+
+
+def encode_nested_example(schema, obj, level=0):
+ """Encode a nested example.
+ This is used since some features (in particular ClassLabel) have some logic during encoding.
+
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
+ If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, dict):
+ if level == 0 and obj is None:
+ raise ValueError("Got None but expected a dictionary instead")
+ return (
+ {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
+ if obj is not None
+ else None
+ )
+
+ elif isinstance(schema, (list, tuple)):
+ sub_schema = schema[0]
+ if obj is None:
+ return None
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
+ break
+ if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
+ return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
+ return list(obj)
+ elif isinstance(schema, Sequence):
+ if obj is None:
+ return None
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
+ if isinstance(schema.feature, dict):
+ # dict of list to fill
+ list_dict = {}
+ if isinstance(obj, (list, tuple)):
+ # obj is a list of dict
+ for k in schema.feature:
+ list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
+ return list_dict
+ else:
+ # obj is a single dict
+ for k in schema.feature:
+ list_dict[k] = (
+ [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
+ if k in obj
+ else None
+ )
+ return list_dict
+ # schema.feature is not a dict
+ if isinstance(obj, str): # don't interpret a string as a list
+ raise ValueError(f"Got a string but expected a list instead: '{obj}'")
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
+ break
+ # be careful when comparing tensors here
+ if (
+ not isinstance(first_elmt, list)
+ or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
+ ):
+ return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
+ return list(obj)
+ # Object with special encoding:
+ # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
+ elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
+ return schema.encode_example(obj) if obj is not None else None
+ # Other object should be directly convertible to a native Arrow type (like Translation and Translation)
+ return obj
+
+
+def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode a nested example.
+ This is used since some features (in particular Audio and Image) have some logic during decoding.
+
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
+ If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, dict):
+ return (
+ {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
+ if obj is not None
+ else None
+ )
+ elif isinstance(schema, (list, tuple)):
+ sub_schema = schema[0]
+ if obj is None:
+ return None
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
+ break
+ if decode_nested_example(sub_schema, first_elmt) != first_elmt:
+ return [decode_nested_example(sub_schema, o) for o in obj]
+ return list(obj)
+ elif isinstance(schema, Sequence):
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
+ if isinstance(schema.feature, dict):
+ return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
+ else:
+ return decode_nested_example([schema.feature], obj)
+ # Object with special decoding:
+ elif isinstance(schema, (Audio, Image)):
+ # we pass the token to read and decode files from private repositories in streaming mode
+ if obj is not None and schema.decode:
+ return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
+ return obj
+
+
+_FEATURE_TYPES: Dict[str, FeatureType] = {
+ Value.__name__: Value,
+ ClassLabel.__name__: ClassLabel,
+ Translation.__name__: Translation,
+ TranslationVariableLanguages.__name__: TranslationVariableLanguages,
+ Sequence.__name__: Sequence,
+ Array2D.__name__: Array2D,
+ Array3D.__name__: Array3D,
+ Array4D.__name__: Array4D,
+ Array5D.__name__: Array5D,
+ Audio.__name__: Audio,
+ Image.__name__: Image,
+}
+
+
+@experimental
+def register_feature(
+ feature_cls: type,
+ feature_type: str,
+):
+ """
+ Register a Feature object using a name and class.
+ This function must be used on a Feature class.
+ """
+ if feature_type in _FEATURE_TYPES:
+ logger.warning(
+ f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})"
+ )
+ _FEATURE_TYPES[feature_type] = feature_cls
+
+
+def generate_from_dict(obj: Any):
+ """Regenerate the nested feature object from a deserialized dict.
+ We use the '_type' fields to get the dataclass name to load.
+
+ generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
+ a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
+ :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
+ mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
+ that :class:`Value` automatically performs.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(obj, list):
+ return [generate_from_dict(value) for value in obj]
+ # Otherwise we have a dict or a dataclass
+ if "_type" not in obj or isinstance(obj["_type"], dict):
+ return {key: generate_from_dict(value) for key, value in obj.items()}
+ obj = dict(obj)
+ _type = obj.pop("_type")
+ class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None)
+
+ if class_type is None:
+ raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}")
+
+ if class_type == Sequence:
+ return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
+
+ field_names = {f.name for f in fields(class_type)}
+ return class_type(**{k: v for k, v in obj.items() if k in field_names})
+
+
+def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
+ """
+ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
+ a single field.
+
+ This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
+
+ This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
+ full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
+ """
+ if isinstance(pa_type, pa.StructType):
+ return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
+ elif isinstance(pa_type, pa.FixedSizeListType):
+ return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
+ elif isinstance(pa_type, pa.ListType):
+ feature = generate_from_arrow_type(pa_type.value_type)
+ if isinstance(feature, (dict, tuple, list)):
+ return [feature]
+ return Sequence(feature=feature)
+ elif isinstance(pa_type, _ArrayXDExtensionType):
+ array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
+ return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
+ elif isinstance(pa_type, pa.DictionaryType):
+ raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
+ elif isinstance(pa_type, pa.DataType):
+ return Value(dtype=_arrow_to_datasets_dtype(pa_type))
+ else:
+ raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
+
+
+def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
+ """Build a PyArrow ListArray from a multidimensional NumPy array"""
+ arr = np.array(arr)
+ values = pa.array(arr.flatten(), type=type)
+ for i in range(arr.ndim - 1):
+ n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
+ step_offsets = arr.shape[arr.ndim - i - 1]
+ offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
+ values = pa.ListArray.from_arrays(offsets, values)
+ return values
+
+
+def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
+ null_mask = np.array([arr is None for arr in l_arr])
+ null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
+ l_arr = [arr for arr in l_arr if arr is not None]
+ offsets = np.cumsum(
+ [0] + [len(arr) for arr in l_arr], dtype=object
+ ) # convert to dtype object to allow None insertion
+ offsets = np.insert(offsets, null_indices, None)
+ offsets = pa.array(offsets, type=pa.int32())
+ values = pa.concat_arrays(l_arr)
+ return pa.ListArray.from_arrays(offsets, values)
+
+
+def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
+ """Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
+ if len(l_arr) > 0:
+ return list_of_pa_arrays_to_pyarrow_listarray(
+ [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
+ )
+ else:
+ return pa.array([], type=type)
+
+
+def contains_any_np_array(data: Any):
+ """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
+
+ Args:
+ data (Any): Data.
+
+ Returns:
+ bool
+ """
+ if isinstance(data, np.ndarray):
+ return True
+ elif isinstance(data, list):
+ return contains_any_np_array(first_non_null_value(data)[1])
+ else:
+ return False
+
+
+def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
+ """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
+
+ Args:
+ data (Union[np.ndarray, List]): Data.
+ type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
+
+ Returns:
+ pa.ListArray
+ """
+ if isinstance(data, np.ndarray):
+ return numpy_to_pyarrow_listarray(data, type=type)
+ elif isinstance(data, list):
+ return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
+
+
+def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
+ """Convert to PyArrow ListArray.
+
+ Args:
+ data (Any): Sequence, iterable, np.ndarray or pd.Series.
+ pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
+
+ Returns:
+ pyarrow.Array
+ """
+ if contains_any_np_array(data):
+ return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
+ else:
+ return pa.array(data, pa_type.storage_dtype)
+
+
+def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
+ """Visit a (possibly nested) feature.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ visited feature (FeatureType)
+ """
+ if isinstance(feature, dict):
+ out = func({k: _visit(f, func) for k, f in feature.items()})
+ elif isinstance(feature, (list, tuple)):
+ out = func([_visit(feature[0], func)])
+ elif isinstance(feature, Sequence):
+ out = func(Sequence(_visit(feature.feature, func), length=feature.length))
+ else:
+ out = func(feature)
+ return feature if out is None else out
+
+
+def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
+ """Check if a (possibly nested) feature requires decoding.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
+ of the `decode` attribute of the decodable feature types.
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_decoding(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_decoding(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_decoding(feature.feature)
+ else:
+ return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
+
+
+def require_storage_cast(feature: FeatureType) -> bool:
+ """Check if a (possibly nested) feature requires storage casting.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_storage_cast(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_storage_cast(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_storage_cast(feature.feature)
+ else:
+ return hasattr(feature, "cast_storage")
+
+
+def require_storage_embed(feature: FeatureType) -> bool:
+ """Check if a (possibly nested) feature requires embedding data into storage.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_storage_cast(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_storage_cast(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_storage_cast(feature.feature)
+ else:
+ return hasattr(feature, "embed_storage")
+
+
+def keep_features_dicts_synced(func):
+ """
+ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
+ in sync with the main dictionary.
+ """
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Features" = args[0]
+ args = args[1:]
+ else:
+ self: "Features" = kwargs.pop("self")
+ out = func(self, *args, **kwargs)
+ assert hasattr(self, "_column_requires_decoding")
+ self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
+ return out
+
+ wrapper._decorator_name_ = "_keep_dicts_synced"
+ return wrapper
+
+
+class Features(dict):
+ """A special dictionary that defines the internal structure of a dataset.
+
+ Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
+ and values are the type of that column.
+
+ `FieldType` can be one of the following:
+ - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
+ - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
+ associated to them and will be stored as integers in the dataset.
+ - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
+ features. It's possible to have nested fields of nested fields in an arbitrary manner.
+ - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
+ `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
+ type hosted in this list.
+
+
+
+ A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
+ lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
+ un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
+ [`~datasets.Sequence`].
+
+
+
+ - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
+ - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
+ to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
+ - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
+ or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
+ - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
+ """
+
+ def __init__(*args, **kwargs):
+ # self not in the signature to allow passing self as a kwarg
+ if not args:
+ raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
+ self, *args = args
+ super(Features, self).__init__(*args, **kwargs)
+ self._column_requires_decoding: Dict[str, bool] = {
+ col: require_decoding(feature) for col, feature in self.items()
+ }
+
+ __setitem__ = keep_features_dicts_synced(dict.__setitem__)
+ __delitem__ = keep_features_dicts_synced(dict.__delitem__)
+ update = keep_features_dicts_synced(dict.update)
+ setdefault = keep_features_dicts_synced(dict.setdefault)
+ pop = keep_features_dicts_synced(dict.pop)
+ popitem = keep_features_dicts_synced(dict.popitem)
+ clear = keep_features_dicts_synced(dict.clear)
+
+ def __reduce__(self):
+ return Features, (dict(self),)
+
+ @property
+ def type(self):
+ """
+ Features field types.
+
+ Returns:
+ :obj:`pyarrow.DataType`
+ """
+ return get_nested_type(self)
+
+ @property
+ def arrow_schema(self):
+ """
+ Features schema.
+
+ Returns:
+ :obj:`pyarrow.Schema`
+ """
+ hf_metadata = {"info": {"features": self.to_dict()}}
+ return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
+
+ @classmethod
+ def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
+ """
+ Construct [`Features`] from Arrow Schema.
+ It also checks the schema metadata for Hugging Face Datasets features.
+ Non-nullable fields are not supported and set to nullable.
+
+ Args:
+ pa_schema (`pyarrow.Schema`):
+ Arrow Schema.
+
+ Returns:
+ [`Features`]
+ """
+ # try to load features from the arrow schema metadata
+ metadata_features = Features()
+ if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
+ metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
+ if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
+ metadata_features = Features.from_dict(metadata["info"]["features"])
+ metadata_features_schema = metadata_features.arrow_schema
+ obj = {
+ field.name: (
+ metadata_features[field.name]
+ if field.name in metadata_features and metadata_features_schema.field(field.name) == field
+ else generate_from_arrow_type(field.type)
+ )
+ for field in pa_schema
+ }
+ return cls(**obj)
+
+ @classmethod
+ def from_dict(cls, dic) -> "Features":
+ """
+ Construct [`Features`] from dict.
+
+ Regenerate the nested feature object from a deserialized dict.
+ We use the `_type` key to infer the dataclass name of the feature `FieldType`.
+
+ It allows for a convenient constructor syntax
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
+ a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
+ [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
+ any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
+ dtypes that [`Value`] automatically performs.
+
+ Args:
+ dic (`dict[str, Any]`):
+ Python dictionary.
+
+ Returns:
+ `Features`
+
+ Example::
+ >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
+ {'_type': Value(dtype='string', id=None)}
+ """
+ obj = generate_from_dict(dic)
+ return cls(**obj)
+
+ def to_dict(self):
+ return asdict(self)
+
+ def _to_yaml_list(self) -> list:
+ # we compute the YAML list from the dict representation that is used for JSON dump
+ yaml_data = self.to_dict()
+
+ def simplify(feature: dict) -> dict:
+ if not isinstance(feature, dict):
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
+
+ #
+ # sequence: -> sequence: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
+ feature["sequence"] = feature["sequence"]["dtype"]
+
+ #
+ # sequence: -> sequence:
+ # struct: -> - name: foo
+ # - name: foo -> dtype: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
+ feature["sequence"] = feature["sequence"]["struct"]
+
+ #
+ # list: -> list: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
+ feature["list"] = feature["list"]["dtype"]
+
+ #
+ # list: -> list:
+ # struct: -> - name: foo
+ # - name: foo -> dtype: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
+ feature["list"] = feature["list"]["struct"]
+
+ #
+ # class_label: -> class_label:
+ # names: -> names:
+ # - negative -> '0': negative
+ # - positive -> '1': positive
+ #
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
+ # server-side requirement: keys must be strings
+ feature["class_label"]["names"] = {
+ str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
+ }
+ return feature
+
+ def to_yaml_inner(obj: Union[dict, list]) -> dict:
+ if isinstance(obj, dict):
+ _type = obj.pop("_type", None)
+ if _type == "Sequence":
+ _feature = obj.pop("feature")
+ return simplify({"sequence": to_yaml_inner(_feature), **obj})
+ elif _type == "Value":
+ return obj
+ elif _type and not obj:
+ return {"dtype": camelcase_to_snakecase(_type)}
+ elif _type:
+ return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
+ else:
+ return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
+ elif isinstance(obj, list):
+ return simplify({"list": simplify(to_yaml_inner(obj[0]))})
+ elif isinstance(obj, tuple):
+ return to_yaml_inner(list(obj))
+ else:
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
+
+ def to_yaml_types(obj: dict) -> dict:
+ if isinstance(obj, dict):
+ return {k: to_yaml_types(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [to_yaml_types(v) for v in obj]
+ elif isinstance(obj, tuple):
+ return to_yaml_types(list(obj))
+ else:
+ return obj
+
+ return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
+
+ @classmethod
+ def _from_yaml_list(cls, yaml_data: list) -> "Features":
+ yaml_data = copy.deepcopy(yaml_data)
+
+ # we convert the list obtained from YAML data into the dict representation that is used for JSON dump
+
+ def unsimplify(feature: dict) -> dict:
+ if not isinstance(feature, dict):
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
+ #
+ # sequence: int32 -> sequence:
+ # -> dtype: int32
+ #
+ if isinstance(feature.get("sequence"), str):
+ feature["sequence"] = {"dtype": feature["sequence"]}
+ #
+ # list: int32 -> list:
+ # -> dtype: int32
+ #
+ if isinstance(feature.get("list"), str):
+ feature["list"] = {"dtype": feature["list"]}
+
+ #
+ # class_label: -> class_label:
+ # names: -> names:
+ # '0': negative -> - negative
+ # '1': positive -> - positive
+ #
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
+ label_ids = sorted(feature["class_label"]["names"], key=int)
+ if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
+ raise ValueError(
+ f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
+ )
+ feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
+ return feature
+
+ def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
+ if isinstance(obj, dict):
+ if not obj:
+ return {}
+ _type = next(iter(obj))
+ if _type == "sequence":
+ _feature = unsimplify(obj).pop(_type)
+ return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
+ if _type == "list":
+ return [from_yaml_inner(unsimplify(obj)[_type])]
+ if _type == "struct":
+ return from_yaml_inner(obj["struct"])
+ elif _type == "dtype":
+ if isinstance(obj["dtype"], str):
+ # e.g. int32, float64, string, audio, image
+ try:
+ Value(obj["dtype"])
+ return {**obj, "_type": "Value"}
+ except ValueError:
+ # e.g. Audio, Image, ArrayXD
+ return {"_type": snakecase_to_camelcase(obj["dtype"])}
+ else:
+ return from_yaml_inner(obj["dtype"])
+ else:
+ return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
+ elif isinstance(obj, list):
+ names = [_feature.pop("name") for _feature in obj]
+ return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
+ else:
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
+
+ return cls.from_dict(from_yaml_inner(yaml_data))
+
+ def encode_example(self, example):
+ """
+ Encode example into a format for Arrow.
+
+ Args:
+ example (`dict[str, Any]`):
+ Data in a Dataset row.
+
+ Returns:
+ `dict[str, Any]`
+ """
+ example = cast_to_python_objects(example)
+ return encode_nested_example(self, example)
+
+ def encode_column(self, column, column_name: str):
+ """
+ Encode column into a format for Arrow.
+
+ Args:
+ column (`list[Any]`):
+ Data in a Dataset column.
+ column_name (`str`):
+ Dataset column name.
+
+ Returns:
+ `list[Any]`
+ """
+ column = cast_to_python_objects(column)
+ return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
+
+ def encode_batch(self, batch):
+ """
+ Encode batch into a format for Arrow.
+
+ Args:
+ batch (`dict[str, list[Any]]`):
+ Data in a Dataset batch.
+
+ Returns:
+ `dict[str, list[Any]]`
+ """
+ encoded_batch = {}
+ if set(batch) != set(self):
+ raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
+ for key, column in batch.items():
+ column = cast_to_python_objects(column)
+ encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column]
+ return encoded_batch
+
+ def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode example with custom feature decoding.
+
+ Args:
+ example (`dict[str, Any]`):
+ Dataset row data.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode audio or image files from private repositories on the Hub, you can pass
+ a dictionary `repo_id (str) -> token (bool or str)`.
+
+ Returns:
+ `dict[str, Any]`
+ """
+
+ return {
+ column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
+ if self._column_requires_decoding[column_name]
+ else value
+ for column_name, (feature, value) in zip_dict(
+ {key: value for key, value in self.items() if key in example}, example
+ )
+ }
+
+ def decode_column(self, column: list, column_name: str):
+ """Decode column with custom feature decoding.
+
+ Args:
+ column (`list[Any]`):
+ Dataset column data.
+ column_name (`str`):
+ Dataset column name.
+
+ Returns:
+ `list[Any]`
+ """
+ return (
+ [decode_nested_example(self[column_name], value) if value is not None else None for value in column]
+ if self._column_requires_decoding[column_name]
+ else column
+ )
+
+ def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode batch with custom feature decoding.
+
+ Args:
+ batch (`dict[str, list[Any]]`):
+ Dataset batch data.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode audio or image files from private repositories on the Hub, you can pass
+ a dictionary repo_id (str) -> token (bool or str)
+
+ Returns:
+ `dict[str, list[Any]]`
+ """
+ decoded_batch = {}
+ for column_name, column in batch.items():
+ decoded_batch[column_name] = (
+ [
+ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
+ if value is not None
+ else None
+ for value in column
+ ]
+ if self._column_requires_decoding[column_name]
+ else column
+ )
+ return decoded_batch
+
+ def copy(self) -> "Features":
+ """
+ Make a deep copy of [`Features`].
+
+ Returns:
+ [`Features`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> copy_of_features = ds.features.copy()
+ >>> copy_of_features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ return copy.deepcopy(self)
+
+ def reorder_fields_as(self, other: "Features") -> "Features":
+ """
+ Reorder Features fields to match the field order of other [`Features`].
+
+ The order of the fields is important since it matters for the underlying arrow data.
+ Re-ordering the fields allows to make the underlying arrow data type match.
+
+ Args:
+ other ([`Features`]):
+ The other [`Features`] to align with.
+
+ Returns:
+ [`Features`]
+
+ Example::
+
+ >>> from datasets import Features, Sequence, Value
+ >>> # let's say we have to features with a different order of nested fields (for a and b for example)
+ >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
+ >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
+ >>> assert f1.type != f2.type
+ >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
+ >>> f1.reorder_fields_as(f2)
+ {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
+ >>> assert f1.reorder_fields_as(f2).type == f2.type
+ """
+
+ def recursive_reorder(source, target, stack=""):
+ stack_position = " at " + stack[1:] if stack else ""
+ if isinstance(target, Sequence):
+ target = target.feature
+ if isinstance(target, dict):
+ target = {k: [v] for k, v in target.items()}
+ else:
+ target = [target]
+ if isinstance(source, Sequence):
+ source, id_, length = source.feature, source.id, source.length
+ if isinstance(source, dict):
+ source = {k: [v] for k, v in source.items()}
+ reordered = recursive_reorder(source, target, stack)
+ return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
+ else:
+ source = [source]
+ reordered = recursive_reorder(source, target, stack)
+ return Sequence(reordered[0], id=id_, length=length)
+ elif isinstance(source, dict):
+ if not isinstance(target, dict):
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
+ if sorted(source) != sorted(target):
+ message = (
+ f"Keys mismatch: between {source} (source) and {target} (target).\n"
+ f"{source.keys()-target.keys()} are missing from target "
+ f"and {target.keys()-source.keys()} are missing from source" + stack_position
+ )
+ raise ValueError(message)
+ return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
+ elif isinstance(source, list):
+ if not isinstance(target, list):
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
+ if len(source) != len(target):
+ raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
+ return [recursive_reorder(source[i], target[i], stack + ".") for i in range(len(target))]
+ else:
+ return source
+
+ return Features(recursive_reorder(self, other))
+
+ def flatten(self, max_depth=16) -> "Features":
+ """Flatten the features. Every dictionary column is removed and is replaced by
+ all the subfields it contains. The new fields are named by concatenating the
+ name of the original column and the subfield name like this: `.`.
+
+ If a column contains nested dictionaries, then all the lower-level subfields names are
+ also concatenated to form new columns: `..`, etc.
+
+ Returns:
+ [`Features`]:
+ The flattened features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad", split="train")
+ >>> ds.features.flatten()
+ {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
+ 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ ```
+ """
+ for depth in range(1, max_depth):
+ no_change = True
+ flattened = self.copy()
+ for column_name, subfeature in self.items():
+ if isinstance(subfeature, dict):
+ no_change = False
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
+ del flattened[column_name]
+ elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
+ no_change = False
+ flattened.update(
+ {
+ f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
+ for k, v in subfeature.feature.items()
+ }
+ )
+ del flattened[column_name]
+ elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
+ no_change = False
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
+ del flattened[column_name]
+ self = flattened
+ if no_change:
+ break
+ return self
+
+
+def _align_features(features_list: List[Features]) -> List[Features]:
+ """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
+ name2feature = {}
+ for features in features_list:
+ for k, v in features.items():
+ if k in name2feature and isinstance(v, dict):
+ # Recursively align features.
+ name2feature[k] = _align_features([name2feature[k], v])[0]
+ elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
+ name2feature[k] = v
+
+ return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
+
+
+def _check_if_features_can_be_aligned(features_list: List[Features]):
+ """Check if the dictionaries of features can be aligned.
+
+ Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
+ """
+ name2feature = {}
+ for features in features_list:
+ for k, v in features.items():
+ if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
+ name2feature[k] = v
+
+ for features in features_list:
+ for k, v in features.items():
+ if isinstance(v, dict) and isinstance(name2feature[k], dict):
+ # Deep checks for structure.
+ _check_if_features_can_be_aligned([name2feature[k], v])
+ elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
+ raise ValueError(
+ f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/image.py b/llmeval-env/lib/python3.10/site-packages/datasets/features/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..c63d4d439641a41d592c38a79343195e2beb591e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/features/image.py
@@ -0,0 +1,383 @@
+import os
+import sys
+import warnings
+from dataclasses import dataclass, field
+from io import BytesIO
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..download.download_config import DownloadConfig
+from ..table import array_cast
+from ..utils.file_utils import is_local_path, xopen
+from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
+
+
+if TYPE_CHECKING:
+ import PIL.Image
+
+ from .features import FeatureType
+
+
+_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
+_NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
+# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
+_VALID_IMAGE_ARRAY_DTPYES = [
+ np.dtype("|b1"),
+ np.dtype("|u1"),
+ np.dtype("u2"),
+ np.dtype("i2"),
+ np.dtype("u4"),
+ np.dtype("i4"),
+ np.dtype("f4"),
+ np.dtype("f8"),
+]
+
+
+@dataclass
+class Image:
+ """Image [`Feature`] to read image data from an image file.
+
+ Input: The Image feature accepts as input:
+ - A `str`: Absolute path to the image file (i.e. random access is allowed).
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the image file to the archive file.
+ - `bytes`: Bytes of the image file.
+
+ This is useful for archived files with sequential access.
+
+ - An `np.ndarray`: NumPy array representing an image.
+ - A `PIL.Image.Image`: PIL image object.
+
+ Args:
+ mode (`str`, *optional*):
+ The mode to convert the image to. If `None`, the native mode of the image is used.
+ decode (`bool`, defaults to `True`):
+ Whether to decode the image data. If `False`,
+ returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
+
+ Examples:
+
+ ```py
+ >>> from datasets import load_dataset, Image
+ >>> ds = load_dataset("beans", split="train")
+ >>> ds.features["image"]
+ Image(decode=True, id=None)
+ >>> ds[0]["image"]
+
+ >>> ds = ds.cast_column('image', Image(decode=False))
+ {'bytes': None,
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
+ ```
+ """
+
+ mode: Optional[str] = None
+ decode: bool = True
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "PIL.Image.Image"
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
+ _type: str = field(default="Image", init=False, repr=False)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
+ """Encode example into a format for Arrow.
+
+ Args:
+ value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
+ Data passed as input to Image feature.
+
+ Returns:
+ `dict` with "path" and "bytes" fields
+ """
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ if isinstance(value, list):
+ value = np.array(value)
+
+ if isinstance(value, str):
+ return {"path": value, "bytes": None}
+ elif isinstance(value, bytes):
+ return {"path": None, "bytes": value}
+ elif isinstance(value, np.ndarray):
+ # convert the image array to PNG/TIFF bytes
+ return encode_np_array(value)
+ elif isinstance(value, PIL.Image.Image):
+ # convert the PIL image to bytes (default format is PNG/TIFF)
+ return encode_pil_image(value)
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
+ # we set "bytes": None to not duplicate the data if they're already available locally
+ return {"bytes": None, "path": value.get("path")}
+ elif value.get("bytes") is not None or value.get("path") is not None:
+ # store the image bytes, and path is used to infer the image format using the file extension
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
+ else:
+ raise ValueError(
+ f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
+ )
+
+ def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
+ """Decode example image file into image data.
+
+ Args:
+ value (`str` or `dict`):
+ A string with the absolute image file path, a dictionary with
+ keys:
+
+ - `path`: String with absolute or relative image file path.
+ - `bytes`: The bytes of the image file.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode
+ image files from private repositories on the Hub, you can pass
+ a dictionary repo_id (`str`) -> token (`bool` or `str`).
+
+ Returns:
+ `PIL.Image.Image`
+ """
+ if not self.decode:
+ raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
+
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ import PIL.ImageOps
+ else:
+ raise ImportError("To support decoding images, please install 'Pillow'.")
+
+ if token_per_repo_id is None:
+ token_per_repo_id = {}
+
+ path, bytes_ = value["path"], value["bytes"]
+ if bytes_ is None:
+ if path is None:
+ raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
+ else:
+ if is_local_path(path):
+ image = PIL.Image.open(path)
+ else:
+ source_url = path.split("::")[-1]
+ pattern = (
+ config.HUB_DATASETS_URL
+ if source_url.startswith(config.HF_ENDPOINT)
+ else config.HUB_DATASETS_HFFS_URL
+ )
+ try:
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
+ token = token_per_repo_id.get(repo_id)
+ except ValueError:
+ token = None
+ download_config = DownloadConfig(token=token)
+ with xopen(path, "rb", download_config=download_config) as f:
+ bytes_ = BytesIO(f.read())
+ image = PIL.Image.open(bytes_)
+ else:
+ image = PIL.Image.open(BytesIO(bytes_))
+ image.load() # to avoid "Too many open files" errors
+ if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None:
+ image = PIL.ImageOps.exif_transpose(image)
+ if self.mode and self.mode != image.mode:
+ image = image.convert(self.mode)
+ return image
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
+ from .features import Value
+
+ return (
+ self
+ if self.decode
+ else {
+ "bytes": Value("binary"),
+ "path": Value("string"),
+ }
+ )
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
+ """Cast an Arrow array to the Image arrow storage type.
+ The Arrow types that can be converted to the Image pyarrow storage type are:
+
+ - `pa.string()` - it must contain the "path" data
+ - `pa.binary()` - it must contain the image bytes
+ - `pa.struct({"bytes": pa.binary()})`
+ - `pa.struct({"path": pa.string()})`
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
+ - `pa.list(*)` - it must contain the image array data
+
+ Args:
+ storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.StructArray`: Array in the Image arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+ if pa.types.is_string(storage.type):
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_binary(storage.type):
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_struct(storage.type):
+ if storage.type.get_field_index("bytes") >= 0:
+ bytes_array = storage.field("bytes")
+ else:
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ if storage.type.get_field_index("path") >= 0:
+ path_array = storage.field("path")
+ else:
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_list(storage.type):
+ bytes_array = pa.array(
+ [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
+ type=pa.binary(),
+ )
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays(
+ [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
+ )
+ return array_cast(storage, self.pa_type)
+
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
+ """Embed image files into the Arrow array.
+
+ Args:
+ storage (`pa.StructArray`):
+ PyArrow array to embed.
+
+ Returns:
+ `pa.StructArray`: Array in the Image arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+
+ @no_op_if_value_is_null
+ def path_to_bytes(path):
+ with xopen(path, "rb") as f:
+ bytes_ = f.read()
+ return bytes_
+
+ bytes_array = pa.array(
+ [
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
+ for x in storage.to_pylist()
+ ],
+ type=pa.binary(),
+ )
+ path_array = pa.array(
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
+ type=pa.string(),
+ )
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
+ return array_cast(storage, self.pa_type)
+
+
+def list_image_compression_formats() -> List[str]:
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ global _IMAGE_COMPRESSION_FORMATS
+ if _IMAGE_COMPRESSION_FORMATS is None:
+ PIL.Image.init()
+ _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
+ return _IMAGE_COMPRESSION_FORMATS
+
+
+def image_to_bytes(image: "PIL.Image.Image") -> bytes:
+ """Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
+ buffer = BytesIO()
+ if image.format in list_image_compression_formats():
+ format = image.format
+ else:
+ format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
+ image.save(buffer, format=format)
+ return buffer.getvalue()
+
+
+def encode_pil_image(image: "PIL.Image.Image") -> dict:
+ if hasattr(image, "filename") and image.filename != "":
+ return {"path": image.filename, "bytes": None}
+ else:
+ return {"path": None, "bytes": image_to_bytes(image)}
+
+
+def encode_np_array(array: np.ndarray) -> dict:
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ dtype = array.dtype
+ dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
+ dtype_kind = dtype.kind
+ dtype_itemsize = dtype.itemsize
+
+ dest_dtype = None
+
+ # Multi-channel array case (only np.dtype("|u1") is allowed)
+ if array.shape[2:]:
+ if dtype_kind not in ["u", "i"]:
+ raise TypeError(
+ f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
+ )
+ dest_dtype = np.dtype("|u1")
+ if dtype != dest_dtype:
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
+ # Exact match
+ elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
+ dest_dtype = dtype
+ else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
+ while dtype_itemsize >= 1:
+ dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
+ if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
+ dest_dtype = np.dtype(dtype_str)
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
+ break
+ else:
+ dtype_itemsize //= 2
+ if dest_dtype is None:
+ raise TypeError(
+ f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
+ )
+
+ image = PIL.Image.fromarray(array.astype(dest_dtype))
+ return {"path": None, "bytes": image_to_bytes(image)}
+
+
+def objects_to_list_of_image_dicts(
+ objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
+) -> List[dict]:
+ """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ if objs:
+ _, obj = first_non_null_value(objs)
+ if isinstance(obj, str):
+ return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
+ if isinstance(obj, np.ndarray):
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
+ return [obj_to_image_dict_func(obj) for obj in objs]
+ elif isinstance(obj, PIL.Image.Image):
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
+ return [obj_to_image_dict_func(obj) for obj in objs]
+ else:
+ return objs
+ else:
+ return objs
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/features/translation.py b/llmeval-env/lib/python3.10/site-packages/datasets/features/translation.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d3eb1af4bbb15397afe4f1e0a5afd54060fcda3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/features/translation.py
@@ -0,0 +1,129 @@
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
+
+import pyarrow as pa
+
+
+if TYPE_CHECKING:
+ from .features import FeatureType
+
+
+@dataclass
+class Translation:
+ """`FeatureConnector` for translations with fixed languages per example.
+ Here for compatiblity with tfds.
+
+ Args:
+ languages (`dict`):
+ A dictionary for each example mapping string language codes to string translations.
+
+ Example:
+
+ ```python
+ >>> # At construction time:
+ >>> datasets.features.Translation(languages=['en', 'fr', 'de'])
+ >>> # During data generation:
+ >>> yield {
+ ... 'en': 'the cat',
+ ... 'fr': 'le chat',
+ ... 'de': 'die katze'
+ ... }
+ ```
+ """
+
+ languages: List[str]
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "dict"
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="Translation", init=False, repr=False)
+
+ def __call__(self):
+ return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """Flatten the Translation feature into a dictionary."""
+ from .features import Value
+
+ return {k: Value("string") for k in sorted(self.languages)}
+
+
+@dataclass
+class TranslationVariableLanguages:
+ """`FeatureConnector` for translations with variable languages per example.
+ Here for compatiblity with tfds.
+
+ Args:
+ languages (`dict`):
+ A dictionary for each example mapping string language codes to one or more string translations.
+ The languages present may vary from example to example.
+
+ Returns:
+ - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
+ Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
+
+ Example:
+
+ ```python
+ >>> # At construction time:
+ >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
+ >>> # During data generation:
+ >>> yield {
+ ... 'en': 'the cat',
+ ... 'fr': ['le chat', 'la chatte,']
+ ... 'de': 'die katze'
+ ... }
+ >>> # Tensor returned :
+ >>> {
+ ... 'language': ['en', 'de', 'fr', 'fr'],
+ ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
+ ... }
+ ```
+ """
+
+ languages: Optional[List] = None
+ num_languages: Optional[int] = None
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "dict"
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
+
+ def __post_init__(self):
+ self.languages = sorted(set(self.languages)) if self.languages else None
+ self.num_languages = len(self.languages) if self.languages else None
+
+ def __call__(self):
+ return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
+
+ def encode_example(self, translation_dict):
+ lang_set = set(self.languages)
+ if set(translation_dict) == {"language", "translation"}:
+ return translation_dict
+ elif self.languages and set(translation_dict) - lang_set:
+ raise ValueError(
+ f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
+ )
+
+ # Convert dictionary into tuples, splitting out cases where there are
+ # multiple translations for a single language.
+ translation_tuples = []
+ for lang, text in translation_dict.items():
+ if isinstance(text, str):
+ translation_tuples.append((lang, text))
+ else:
+ translation_tuples.extend([(lang, el) for el in text])
+
+ # Ensure translations are in ascending order by language code.
+ languages, translations = zip(*sorted(translation_tuples))
+
+ return {"language": languages, "translation": translations}
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """Flatten the TranslationVariableLanguages feature into a dictionary."""
+ from .features import Sequence, Value
+
+ return {
+ "language": Sequence(Value("string")),
+ "translation": Sequence(Value("string")),
+ }
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5db2346160944090b7c01ef79f300d00275af9fc
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81025394358fecfd6768254f8dd6aeb0923bf352
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..73aef16d772ea3cd62f5c41e9cb89914110eb475
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd1ecbf12da4a1541115500d38d311346fc161d5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py
@@ -0,0 +1,74 @@
+import itertools
+from dataclasses import dataclass
+from typing import Optional
+
+import pyarrow as pa
+
+import datasets
+from datasets.table import table_cast
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class ArrowConfig(datasets.BuilderConfig):
+ """BuilderConfig for Arrow."""
+
+ features: Optional[datasets.Features] = None
+
+
+class Arrow(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = ArrowConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ # Infer features is they are stoed in the arrow schema
+ if self.info.features is None:
+ for file in itertools.chain.from_iterable(files):
+ with open(file, "rb") as f:
+ self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
+ break
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.info.features is not None:
+ # more expensive cast to support nested features with keys in a different order
+ # allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ with open(file, "rb") as f:
+ try:
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
+ pa_table = pa.Table.from_batches([record_batch])
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
+ except ValueError as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py
new file mode 100644
index 0000000000000000000000000000000000000000..51044143039e98af0f9fd7d1ecdf1cab229e58a1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py
@@ -0,0 +1,68 @@
+from typing import List
+
+import datasets
+from datasets.tasks import AudioClassification
+
+from ..folder_based_builder import folder_based_builder
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
+ """Builder Config for AudioFolder."""
+
+ drop_labels: bool = None
+ drop_metadata: bool = None
+
+
+class AudioFolder(folder_based_builder.FolderBasedBuilder):
+ BASE_FEATURE = datasets.Audio
+ BASE_COLUMN_NAME = "audio"
+ BUILDER_CONFIG_CLASS = AudioFolderConfig
+ EXTENSIONS: List[str] # definition at the bottom of the script
+ CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
+
+
+# Obtained with:
+# ```
+# import soundfile as sf
+#
+# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
+#
+# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
+# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
+# ```
+# We intentionally do not run this code on launch because:
+# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
+# (2) To ensure the list of supported extensions is deterministic
+AUDIO_EXTENSIONS = [
+ ".aiff",
+ ".au",
+ ".avr",
+ ".caf",
+ ".flac",
+ ".htk",
+ ".svx",
+ ".mat4",
+ ".mat5",
+ ".mpc2k",
+ ".ogg",
+ ".paf",
+ ".pvf",
+ ".raw",
+ ".rf64",
+ ".sd2",
+ ".sds",
+ ".ircam",
+ ".voc",
+ ".w64",
+ ".wav",
+ ".nist",
+ ".wavex",
+ ".wve",
+ ".xi",
+ ".mp3",
+ ".opus",
+]
+AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..face7aa2ff49d0dd546c3180ae30dac9cf69dd5b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..06411478bd0fe34d3c3fe51f02651b1a17ad307d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..458cee224c49b74df157d020e6141f510df885de
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd8c054842e090dc09bdf3d2fee59241a1a928c5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py
@@ -0,0 +1,285 @@
+#
+# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.
+# This file coems from the WebDataset library.
+# See the LICENSE file for licensing terms (BSD-style).
+#
+
+"""
+Binary tensor encodings for PyTorch and NumPy.
+
+This defines efficient binary encodings for tensors. The format is 8 byte
+aligned and can be used directly for computations when transmitted, say,
+via RDMA. The format is supported by WebDataset with the `.ten` filename
+extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used
+for fast tensor storage with LMDB and in disk files (which can be memory
+mapped)
+
+Data is encoded as a series of chunks:
+
+- magic number (int64)
+- length in bytes (int64)
+- bytes (multiple of 64 bytes long)
+
+Arrays are a header chunk followed by a data chunk.
+Header chunks have the following structure:
+
+- dtype (int64)
+- 8 byte array name
+- ndim (int64)
+- dim[0]
+- dim[1]
+- ...
+"""
+
+import struct
+import sys
+
+import numpy as np
+
+
+def bytelen(a):
+ """Determine the length of a in bytes."""
+ if hasattr(a, "nbytes"):
+ return a.nbytes
+ elif isinstance(a, (bytearray, bytes)):
+ return len(a)
+ else:
+ raise ValueError(a, "cannot determine nbytes")
+
+
+def bytedata(a):
+ """Return a the raw data corresponding to a."""
+ if isinstance(a, (bytearray, bytes, memoryview)):
+ return a
+ elif hasattr(a, "data"):
+ return a.data
+ else:
+ raise ValueError(a, "cannot return bytedata")
+
+
+# tables for converting between long/short NumPy dtypes
+
+long_to_short = """
+float16 f2
+float32 f4
+float64 f8
+int8 i1
+int16 i2
+int32 i4
+int64 i8
+uint8 u1
+uint16 u2
+unit32 u4
+uint64 u8
+""".strip()
+long_to_short = [x.split() for x in long_to_short.split("\n")]
+long_to_short = {x[0]: x[1] for x in long_to_short}
+short_to_long = {v: k for k, v in long_to_short.items()}
+
+
+def check_acceptable_input_type(data, allow64):
+ """Check that the data has an acceptable type for tensor encoding.
+
+ :param data: array
+ :param allow64: allow 64 bit types
+ """
+ for a in data:
+ if a.dtype.name not in long_to_short:
+ raise ValueError("unsupported dataypte")
+ if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]:
+ raise ValueError("64 bit datatypes not allowed unless explicitly enabled")
+
+
+def str64(s):
+ """Convert a string to an int64."""
+ s = s + "\0" * (8 - len(s))
+ s = s.encode("ascii")
+ return struct.unpack("@q", s)[0]
+
+
+def unstr64(i):
+ """Convert an int64 to a string."""
+ b = struct.pack("@q", i)
+ return b.decode("ascii").strip("\0")
+
+
+def check_infos(data, infos, required_infos=None):
+ """Verify the info strings."""
+ if required_infos is False or required_infos is None:
+ return data
+ if required_infos is True:
+ return data, infos
+ if not isinstance(required_infos, (tuple, list)):
+ raise ValueError("required_infos must be tuple or list")
+ for required, actual in zip(required_infos, infos):
+ raise ValueError(f"actual info {actual} doesn't match required info {required}")
+ return data
+
+
+def encode_header(a, info=""):
+ """Encode an array header as a byte array."""
+ if a.ndim >= 10:
+ raise ValueError("too many dimensions")
+ if a.nbytes != np.prod(a.shape) * a.itemsize:
+ raise ValueError("mismatch between size and shape")
+ if a.dtype.name not in long_to_short:
+ raise ValueError("unsupported array type")
+ header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape)
+ return bytedata(np.array(header, dtype="i8"))
+
+
+def decode_header(h):
+ """Decode a byte array into an array header."""
+ h = np.frombuffer(h, dtype="i8")
+ if unstr64(h[0]) not in short_to_long:
+ raise ValueError("unsupported array type")
+ dtype = np.dtype(short_to_long[unstr64(h[0])])
+ info = unstr64(h[1])
+ rank = int(h[2])
+ shape = tuple(h[3 : 3 + rank])
+ return shape, dtype, info
+
+
+def encode_list(l, infos=None): # noqa: E741
+ """Given a list of arrays, encode them into a list of byte arrays."""
+ if infos is None:
+ infos = [""]
+ else:
+ if len(l) != len(infos):
+ raise ValueError(f"length of list {l} must muatch length of infos {infos}")
+ result = []
+ for i, a in enumerate(l):
+ header = encode_header(a, infos[i % len(infos)])
+ result += [header, bytedata(a)]
+ return result
+
+
+def decode_list(l, infos=False): # noqa: E741
+ """Given a list of byte arrays, decode them into arrays."""
+ result = []
+ infos0 = []
+ for header, data in zip(l[::2], l[1::2]):
+ shape, dtype, info = decode_header(header)
+ a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape)
+ result += [a]
+ infos0 += [info]
+ return check_infos(result, infos0, infos)
+
+
+magic_str = "~TenBin~"
+magic = str64(magic_str)
+magic_bytes = unstr64(magic).encode("ascii")
+
+
+def roundup(n, k=64):
+ """Round up to the next multiple of 64."""
+ return k * ((n + k - 1) // k)
+
+
+def encode_chunks(l): # noqa: E741
+ """Encode a list of chunks into a single byte array, with lengths and magics.."""
+ size = sum(16 + roundup(b.nbytes) for b in l)
+ result = bytearray(size)
+ offset = 0
+ for b in l:
+ result[offset : offset + 8] = magic_bytes
+ offset += 8
+ result[offset : offset + 8] = struct.pack("@q", b.nbytes)
+ offset += 8
+ result[offset : offset + bytelen(b)] = b
+ offset += roundup(bytelen(b))
+ return result
+
+
+def decode_chunks(buf):
+ """Decode a byte array into a list of chunks."""
+ result = []
+ offset = 0
+ total = bytelen(buf)
+ while offset < total:
+ if magic_bytes != buf[offset : offset + 8]:
+ raise ValueError("magic bytes mismatch")
+ offset += 8
+ nbytes = struct.unpack("@q", buf[offset : offset + 8])[0]
+ offset += 8
+ b = buf[offset : offset + nbytes]
+ offset += roundup(nbytes)
+ result.append(b)
+ return result
+
+
+def encode_buffer(l, infos=None): # noqa: E741
+ """Encode a list of arrays into a single byte array."""
+ if not isinstance(l, list):
+ raise ValueError("requires list")
+ return encode_chunks(encode_list(l, infos=infos))
+
+
+def decode_buffer(buf, infos=False):
+ """Decode a byte array into a list of arrays."""
+ return decode_list(decode_chunks(buf), infos=infos)
+
+
+def write_chunk(stream, buf):
+ """Write a byte chunk to the stream with magics, length, and padding."""
+ nbytes = bytelen(buf)
+ stream.write(magic_bytes)
+ stream.write(struct.pack("@q", nbytes))
+ stream.write(bytedata(buf))
+ padding = roundup(nbytes) - nbytes
+ if padding > 0:
+ stream.write(b"\0" * padding)
+
+
+def read_chunk(stream):
+ """Read a byte chunk from a stream with magics, length, and padding."""
+ magic = stream.read(8)
+ if magic == b"":
+ return None
+ if magic != magic_bytes:
+ raise ValueError("magic number does not match")
+ nbytes = stream.read(8)
+ nbytes = struct.unpack("@q", nbytes)[0]
+ if nbytes < 0:
+ raise ValueError("negative nbytes")
+ data = stream.read(nbytes)
+ padding = roundup(nbytes) - nbytes
+ if padding > 0:
+ stream.read(padding)
+ return data
+
+
+def write(stream, l, infos=None): # noqa: E741
+ """Write a list of arrays to a stream, with magics, length, and padding."""
+ for chunk in encode_list(l, infos=infos):
+ write_chunk(stream, chunk)
+
+
+def read(stream, n=sys.maxsize, infos=False):
+ """Read a list of arrays from a stream, with magics, length, and padding."""
+ chunks = []
+ for _ in range(n):
+ header = read_chunk(stream)
+ if header is None:
+ break
+ data = read_chunk(stream)
+ if data is None:
+ raise ValueError("premature EOF")
+ chunks += [header, data]
+ return decode_list(chunks, infos=infos)
+
+
+def save(fname, *args, infos=None, nocheck=False):
+ """Save a list of arrays to a file, with magics, length, and padding."""
+ if not nocheck and not fname.endswith(".ten"):
+ raise ValueError("file name should end in .ten")
+ with open(fname, "wb") as stream:
+ write(stream, args, infos=infos)
+
+
+def load(fname, infos=False, nocheck=False):
+ """Read a list of arrays from a file, with magics, length, and padding."""
+ if not nocheck and not fname.endswith(".ten"):
+ raise ValueError("file name should end in .ten")
+ with open(fname, "rb") as stream:
+ return read(stream, infos=infos)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ac1e86fc417863ba9b5fd8fca97581c63d48768
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py
@@ -0,0 +1,299 @@
+import io
+import json
+from itertools import islice
+from typing import Any, Callable, Dict, List
+
+import numpy as np
+import pyarrow as pa
+
+import datasets
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+class WebDataset(datasets.GeneratorBasedBuilder):
+ DEFAULT_WRITER_BATCH_SIZE = 100
+ IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script
+ AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script
+ DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script
+ NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5
+
+ @classmethod
+ def _get_pipeline_from_tar(cls, tar_path, tar_iterator):
+ current_example = {}
+ for filename, f in tar_iterator:
+ if "." in filename:
+ example_key, field_name = filename.split(".", 1)
+ if current_example and current_example["__key__"] != example_key:
+ yield current_example
+ current_example = {}
+ current_example["__key__"] = example_key
+ current_example["__url__"] = tar_path
+ current_example[field_name.lower()] = f.read()
+ if field_name in cls.DECODERS:
+ current_example[field_name] = cls.DECODERS[field_name](current_example[field_name])
+ if current_example:
+ yield current_example
+
+ def _info(self) -> datasets.DatasetInfo:
+ return datasets.DatasetInfo()
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ # Download the data files
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ data_files = dl_manager.download(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ tar_paths = data_files
+ if isinstance(tar_paths, str):
+ tar_paths = [tar_paths]
+ tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
+ splits = [
+ datasets.SplitGenerator(
+ name=datasets.Split.TRAIN, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
+ )
+ ]
+ else:
+ splits = []
+ for split_name, tar_paths in data_files.items():
+ if isinstance(tar_paths, str):
+ tar_paths = [tar_paths]
+ tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
+ splits.append(
+ datasets.SplitGenerator(
+ name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
+ )
+ )
+ if not self.info.features:
+ # Get one example to get the feature types
+ pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0])
+ first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE))
+ if any(example.keys() != first_examples[0].keys() for example in first_examples):
+ raise ValueError(
+ "The TAR archives of the dataset should be in WebDataset format, "
+ "but the files in the archive don't share the same prefix or the same types."
+ )
+ pa_tables = [pa.Table.from_pylist([example]) for example in first_examples]
+ if datasets.config.PYARROW_VERSION.major < 14:
+ inferred_arrow_schema = pa.concat_tables(pa_tables, promote=True).schema
+ else:
+ inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema
+ features = datasets.Features.from_arrow_schema(inferred_arrow_schema)
+
+ # Set Image types
+ for field_name in first_examples[0]:
+ extension = field_name.rsplit(".", 1)[-1]
+ if extension in self.IMAGE_EXTENSIONS:
+ features[field_name] = datasets.Image()
+ # Set Audio types
+ for field_name in first_examples[0]:
+ extension = field_name.rsplit(".", 1)[-1]
+ if extension in self.AUDIO_EXTENSIONS:
+ features[field_name] = datasets.Audio()
+ self.info.features = features
+
+ return splits
+
+ def _generate_examples(self, tar_paths, tar_iterators):
+ image_field_names = [
+ field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image)
+ ]
+ audio_field_names = [
+ field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio)
+ ]
+ for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)):
+ for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)):
+ for field_name in image_field_names + audio_field_names:
+ example[field_name] = {"path": example["__key__"] + "." + field_name, "bytes": example[field_name]}
+ yield f"{tar_idx}_{example_idx}", example
+
+
+# Obtained with:
+# ```
+# import PIL.Image
+# IMAGE_EXTENSIONS = []
+# PIL.Image.init()
+# for ext, format in PIL.Image.EXTENSION.items():
+# if format in PIL.Image.OPEN:
+# IMAGE_EXTENSIONS.append(ext[1:])
+# ```
+# We intentionally do not run this code on launch because:
+# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
+# (2) To ensure the list of supported extensions is deterministic
+IMAGE_EXTENSIONS = [
+ "blp",
+ "bmp",
+ "dib",
+ "bufr",
+ "cur",
+ "pcx",
+ "dcx",
+ "dds",
+ "ps",
+ "eps",
+ "fit",
+ "fits",
+ "fli",
+ "flc",
+ "ftc",
+ "ftu",
+ "gbr",
+ "gif",
+ "grib",
+ "h5",
+ "hdf",
+ "png",
+ "apng",
+ "jp2",
+ "j2k",
+ "jpc",
+ "jpf",
+ "jpx",
+ "j2c",
+ "icns",
+ "ico",
+ "im",
+ "iim",
+ "tif",
+ "tiff",
+ "jfif",
+ "jpe",
+ "jpg",
+ "jpeg",
+ "mpg",
+ "mpeg",
+ "msp",
+ "pcd",
+ "pxr",
+ "pbm",
+ "pgm",
+ "ppm",
+ "pnm",
+ "psd",
+ "bw",
+ "rgb",
+ "rgba",
+ "sgi",
+ "ras",
+ "tga",
+ "icb",
+ "vda",
+ "vst",
+ "webp",
+ "wmf",
+ "emf",
+ "xbm",
+ "xpm",
+]
+WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS
+
+
+# Obtained with:
+# ```
+# import soundfile as sf
+#
+# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
+#
+# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
+# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
+# ```
+# We intentionally do not run this code on launch because:
+# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
+# (2) To ensure the list of supported extensions is deterministic
+AUDIO_EXTENSIONS = [
+ "aiff",
+ "au",
+ "avr",
+ "caf",
+ "flac",
+ "htk",
+ "svx",
+ "mat4",
+ "mat5",
+ "mpc2k",
+ "ogg",
+ "paf",
+ "pvf",
+ "raw",
+ "rf64",
+ "sd2",
+ "sds",
+ "ircam",
+ "voc",
+ "w64",
+ "wav",
+ "nist",
+ "wavex",
+ "wve",
+ "xi",
+ "mp3",
+ "opus",
+]
+WebDataset.AUDIO_EXTENSIONS = AUDIO_EXTENSIONS
+
+
+def text_loads(data: bytes):
+ return data.decode("utf-8")
+
+
+def tenbin_loads(data: bytes):
+ from . import _tenbin
+
+ return _tenbin.decode_buffer(data)
+
+
+def msgpack_loads(data: bytes):
+ import msgpack
+
+ return msgpack.unpackb(data)
+
+
+def npy_loads(data: bytes):
+ import numpy.lib.format
+
+ stream = io.BytesIO(data)
+ return numpy.lib.format.read_array(stream, allow_pickle=False)
+
+
+def npz_loads(data: bytes):
+ return np.load(io.BytesIO(data), allow_pickle=False)
+
+
+def cbor_loads(data: bytes):
+ import cbor
+
+ return cbor.loads(data)
+
+
+# Obtained by checking `decoders` in `webdataset.autodecode`
+# and removing unsafe extension decoders.
+# Removed Pickle decoders:
+# - "pyd": lambda data: pickle.loads(data)
+# - "pickle": lambda data: pickle.loads(data)
+# Removed Torch decoders:
+# - "pth": lambda data: torch_loads(data)
+# Modified NumPy decoders to fix CVE-2019-6446 (add allow_pickle=False):
+# - "npy": npy_loads,
+# - "npz": lambda data: np.load(io.BytesIO(data)),
+DECODERS = {
+ "txt": text_loads,
+ "text": text_loads,
+ "transcript": text_loads,
+ "cls": int,
+ "cls2": int,
+ "index": int,
+ "inx": int,
+ "id": int,
+ "json": json.loads,
+ "jsn": json.loads,
+ "ten": tenbin_loads,
+ "tb": tenbin_loads,
+ "mp": msgpack_loads,
+ "msg": msgpack_loads,
+ "npy": npy_loads,
+ "npz": npz_loads,
+ "cbor": cbor_loads,
+}
+WebDataset.DECODERS = DECODERS
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5df74ff8cac8f1fd30a5dd786c9cd5c89d2880af
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ruff: noqa
+
+from . import tqdm as _tqdm # _tqdm is the module
+from .info_utils import VerificationMode
+from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
+from .version import Version
+from .experimental import experimental
+from .tqdm import (
+ disable_progress_bars,
+ enable_progress_bars,
+ are_progress_bars_disabled,
+ tqdm,
+)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dill.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dill.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45efc655ebc79ef896baddae79d77272ecf61d08
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_dill.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/experimental.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/experimental.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a64b83b32c81cc327c13277490e31a7e48e0b4a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/experimental.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/file_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/file_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd718279f57304a85a928a468afb26f899781401
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/file_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..749cad066a0aabc231cb8e40938bc004fefd7617
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79c9be4bb320563e5829fab6b6012590085c1ebd
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/patching.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/patching.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0823e511b3afaaf12def6ced8760de6b471cd1b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/patching.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b082c6d3508d5bc7c9b8b0b41bafd676d769dfdf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a459658fba4e1f70b1f18fc0f4dbe0cf152c778f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48098c10d2cfd29b5d2710cd9203fcdc30a7f1bc
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ebd6218dd500b18ffa2b9d4634459125d70400f
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/_dataset_viewer.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/_dataset_viewer.py
new file mode 100644
index 0000000000000000000000000000000000000000..58edecb1cc14cec752afc34c1551c49c7490ca4c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/_dataset_viewer.py
@@ -0,0 +1,96 @@
+from typing import Any, Dict, List, Optional, Union
+
+from .. import config
+from ..exceptions import DatasetsError
+from .file_utils import (
+ get_authentication_headers_for_url,
+ http_get,
+)
+from .logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+class DatasetViewerError(DatasetsError):
+ """Dataset viewer error.
+
+ Raised when trying to use the dataset viewer HTTP API and when trying to access:
+ - a missing dataset, or
+ - a private/gated dataset and the user is not authenticated.
+ - unavailable /parquet or /info responses
+ """
+
+
+def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
+ """
+ Get the dataset exported parquet files
+ Docs: https://huggingface.co/docs/datasets-server/parquet
+ """
+ dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
+ try:
+ parquet_data_files_response = http_get(
+ url=dataset_viewer_parquet_url + dataset,
+ temp_file=None,
+ headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
+ timeout=100.0,
+ max_retries=3,
+ )
+ parquet_data_files_response.raise_for_status()
+ if "X-Revision" in parquet_data_files_response.headers:
+ if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
+ parquet_data_files_response_json = parquet_data_files_response.json()
+ if (
+ parquet_data_files_response_json.get("partial") is False
+ and not parquet_data_files_response_json.get("pending", True)
+ and not parquet_data_files_response_json.get("failed", True)
+ and "parquet_files" in parquet_data_files_response_json
+ ):
+ return parquet_data_files_response_json["parquet_files"]
+ else:
+ logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
+ else:
+ logger.debug(
+ f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
+ )
+ except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
+ logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
+ raise DatasetViewerError("No exported Parquet files available.")
+
+
+def get_exported_dataset_infos(
+ dataset: str, revision: str, token: Optional[Union[str, bool]]
+) -> Dict[str, Dict[str, Any]]:
+ """
+ Get the dataset information, can be useful to get e.g. the dataset features.
+ Docs: https://huggingface.co/docs/datasets-server/info
+ """
+ dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
+ try:
+ info_response = http_get(
+ url=dataset_viewer_info_url + dataset,
+ temp_file=None,
+ headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
+ timeout=100.0,
+ max_retries=3,
+ )
+ info_response.raise_for_status()
+ if "X-Revision" in info_response.headers:
+ if info_response.headers["X-Revision"] == revision or revision is None:
+ info_response = info_response.json()
+ if (
+ info_response.get("partial") is False
+ and not info_response.get("pending", True)
+ and not info_response.get("failed", True)
+ and "dataset_info" in info_response
+ ):
+ return info_response["dataset_info"]
+ else:
+ logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
+ else:
+ logger.debug(
+ f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
+ )
+ except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
+ logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
+ raise DatasetViewerError("No exported dataset infos available.")
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/_dill.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/_dill.py
new file mode 100644
index 0000000000000000000000000000000000000000..15578198a39622340f937a3dfdd9091af26d5453
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/_dill.py
@@ -0,0 +1,459 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Extends `dill` to support pickling more types and produce more consistent dumps."""
+
+import os
+import sys
+from io import BytesIO
+from types import CodeType, FunctionType
+
+import dill
+from packaging import version
+
+from .. import config
+
+
+class Pickler(dill.Pickler):
+ dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
+ _legacy_no_dict_keys_sorting = False
+
+ def save(self, obj, save_persistent_id=True):
+ obj_type = type(obj)
+ if obj_type not in self.dispatch:
+ if "regex" in sys.modules:
+ import regex # type: ignore
+
+ if obj_type is regex.Pattern:
+ pklregister(obj_type)(_save_regexPattern)
+ if "spacy" in sys.modules:
+ import spacy # type: ignore
+
+ if issubclass(obj_type, spacy.Language):
+ pklregister(obj_type)(_save_spacyLanguage)
+ if "tiktoken" in sys.modules:
+ import tiktoken # type: ignore
+
+ if obj_type is tiktoken.Encoding:
+ pklregister(obj_type)(_save_tiktokenEncoding)
+ if "torch" in sys.modules:
+ import torch # type: ignore
+
+ if issubclass(obj_type, torch.Tensor):
+ pklregister(obj_type)(_save_torchTensor)
+
+ if obj_type is torch.Generator:
+ pklregister(obj_type)(_save_torchGenerator)
+
+ # Unwrap `torch.compile`-ed modules
+ if issubclass(obj_type, torch.nn.Module):
+ obj = getattr(obj, "_orig_mod", obj)
+ if "transformers" in sys.modules:
+ import transformers # type: ignore
+
+ if issubclass(obj_type, transformers.PreTrainedTokenizerBase):
+ pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase)
+
+ # Unwrap `torch.compile`-ed functions
+ if obj_type is FunctionType:
+ obj = getattr(obj, "_torchdynamo_orig_callable", obj)
+ dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id)
+
+ def _batch_setitems(self, items):
+ if self._legacy_no_dict_keys_sorting:
+ return super()._batch_setitems(items)
+ # Ignore the order of keys in a dict
+ try:
+ # Faster, but fails for unorderable elements
+ items = sorted(items)
+ except Exception: # TypeError, decimal.InvalidOperation, etc.
+ from datasets.fingerprint import Hasher
+
+ items = sorted(items, key=lambda x: Hasher.hash(x[0]))
+ dill.Pickler._batch_setitems(self, items)
+
+ def memoize(self, obj):
+ # Don't memoize strings since two identical strings can have different Python ids
+ if type(obj) is not str: # noqa: E721
+ dill.Pickler.memoize(self, obj)
+
+
+def pklregister(t):
+ """Register a custom reducer for the type."""
+
+ def proxy(func):
+ Pickler.dispatch[t] = func
+ return func
+
+ return proxy
+
+
+def dump(obj, file):
+ """Pickle an object to a file."""
+ Pickler(file, recurse=True).dump(obj)
+
+
+def dumps(obj):
+ """Pickle an object to a string."""
+ file = BytesIO()
+ dump(obj, file)
+ return file.getvalue()
+
+
+if config.DILL_VERSION < version.parse("0.3.6"):
+
+ def log(pickler, msg):
+ dill._dill.log.info(msg)
+
+elif config.DILL_VERSION.release[:3] in [
+ version.parse("0.3.6").release,
+ version.parse("0.3.7").release,
+ version.parse("0.3.8").release,
+]:
+
+ def log(pickler, msg):
+ dill._dill.logger.trace(pickler, msg)
+
+
+@pklregister(set)
+def _save_set(pickler, obj):
+ log(pickler, f"Se: {obj}")
+ try:
+ # Faster, but fails for unorderable elements
+ args = (sorted(obj),)
+ except Exception: # TypeError, decimal.InvalidOperation, etc.
+ from datasets.fingerprint import Hasher
+
+ args = (sorted(obj, key=Hasher.hash),)
+
+ pickler.save_reduce(set, args, obj=obj)
+ log(pickler, "# Se")
+
+
+def _save_regexPattern(pickler, obj):
+ import regex # type: ignore
+
+ log(pickler, f"Re: {obj}")
+ args = (obj.pattern, obj.flags)
+ pickler.save_reduce(regex.compile, args, obj=obj)
+ log(pickler, "# Re")
+
+
+def _save_tiktokenEncoding(pickler, obj):
+ import tiktoken # type: ignore
+
+ log(pickler, f"Enc: {obj}")
+ args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens)
+ pickler.save_reduce(tiktoken.Encoding, args, obj=obj)
+ log(pickler, "# Enc")
+
+
+def _save_torchTensor(pickler, obj):
+ import torch # type: ignore
+
+ # `torch.from_numpy` is not picklable in `torch>=1.11.0`
+ def create_torchTensor(np_array):
+ return torch.from_numpy(np_array)
+
+ log(pickler, f"To: {obj}")
+ args = (obj.detach().cpu().numpy(),)
+ pickler.save_reduce(create_torchTensor, args, obj=obj)
+ log(pickler, "# To")
+
+
+def _save_torchGenerator(pickler, obj):
+ import torch # type: ignore
+
+ def create_torchGenerator(state):
+ generator = torch.Generator()
+ generator.set_state(state)
+ return generator
+
+ log(pickler, f"Ge: {obj}")
+ args = (obj.get_state(),)
+ pickler.save_reduce(create_torchGenerator, args, obj=obj)
+ log(pickler, "# Ge")
+
+
+def _save_spacyLanguage(pickler, obj):
+ import spacy # type: ignore
+
+ def create_spacyLanguage(config, bytes):
+ lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"])
+ lang_inst = lang_cls.from_config(config)
+ return lang_inst.from_bytes(bytes)
+
+ log(pickler, f"Sp: {obj}")
+ args = (obj.config, obj.to_bytes())
+ pickler.save_reduce(create_spacyLanguage, args, obj=obj)
+ log(pickler, "# Sp")
+
+
+def _save_transformersPreTrainedTokenizerBase(pickler, obj):
+ log(pickler, f"Tok: {obj}")
+ # Ignore the `cache` attribute
+ state = obj.__dict__
+ if "cache" in state and isinstance(state["cache"], dict):
+ state["cache"] = {}
+ pickler.save_reduce(type(obj), (), state=state, obj=obj)
+ log(pickler, "# Tok")
+
+
+if config.DILL_VERSION < version.parse("0.3.6"):
+
+ @pklregister(CodeType)
+ def _save_code(pickler, obj):
+ """
+ From dill._dill.save_code
+ This is a modified version that removes the origin (filename + line no.)
+ of functions created in notebooks or shells for example.
+ """
+ dill._dill.log.info(f"Co: {obj}")
+ # The filename of a function is the .py file where it is defined.
+ # Filenames of functions created in notebooks or shells start with '<'
+ # ex: for ipython, and for shell
+ # Filenames of functions created in ipykernel the filename
+ # look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
+ # Moreover lambda functions have a special name: ''
+ # ex: (lambda x: x).__code__.co_name == "" # True
+ #
+ # For the hashing mechanism we ignore where the function has been defined
+ # More specifically:
+ # - we ignore the filename of special functions (filename starts with '<')
+ # - we always ignore the line number
+ # - we only use the base name of the file instead of the whole path,
+ # to be robust in case a script is moved for example.
+ #
+ # Only those two lines are different from the original implementation:
+ co_filename = (
+ ""
+ if obj.co_filename.startswith("<")
+ or (
+ len(obj.co_filename.split(os.path.sep)) > 1
+ and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
+ )
+ or obj.co_name == ""
+ else os.path.basename(obj.co_filename)
+ )
+ co_firstlineno = 1
+ # The rest is the same as in the original dill implementation
+ if dill._dill.PY3:
+ if hasattr(obj, "co_posonlyargcount"):
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename,
+ obj.co_name,
+ co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ else:
+ args = (
+ obj.co_argcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename,
+ obj.co_name,
+ co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ else:
+ args = (
+ obj.co_argcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename,
+ obj.co_name,
+ co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ pickler.save_reduce(CodeType, args, obj=obj)
+ dill._dill.log.info("# Co")
+ return
+
+elif config.DILL_VERSION.release[:3] in [
+ version.parse("0.3.6").release,
+ version.parse("0.3.7").release,
+ version.parse("0.3.8").release,
+]:
+ # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104
+ @pklregister(CodeType)
+ def save_code(pickler, obj):
+ dill._dill.logger.trace(pickler, "Co: %s", obj)
+
+ ############################################################################################################
+ # Modification here for huggingface/datasets
+ # The filename of a function is the .py file where it is defined.
+ # Filenames of functions created in notebooks or shells start with '<'
+ # ex: for ipython, and for shell
+ # Filenames of functions created in ipykernel the filename
+ # look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
+ # Moreover lambda functions have a special name: ''
+ # ex: (lambda x: x).__code__.co_name == "" # True
+ #
+ # For the hashing mechanism we ignore where the function has been defined
+ # More specifically:
+ # - we ignore the filename of special functions (filename starts with '<')
+ # - we always ignore the line number
+ # - we only use the base name of the file instead of the whole path,
+ # to be robust in case a script is moved for example.
+ #
+ # Only those two lines are different from the original implementation:
+ co_filename = (
+ ""
+ if obj.co_filename.startswith("<")
+ or (
+ len(obj.co_filename.split(os.path.sep)) > 1
+ and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
+ )
+ or obj.co_name == ""
+ else os.path.basename(obj.co_filename)
+ )
+ co_firstlineno = 1
+ # The rest is the same as in the original dill implementation, except for the replacements:
+ # - obj.co_filename => co_filename
+ # - obj.co_firstlineno => co_firstlineno
+ ############################################################################################################
+
+ if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ obj.co_qualname,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_linetable,
+ obj.co_endlinetable,
+ obj.co_columntable,
+ obj.co_exceptiontable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ obj.co_qualname,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_linetable,
+ obj.co_exceptiontable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_linetable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ else: # python 3.7 (15 args)
+ args = (
+ obj.co_argcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+
+ pickler.save_reduce(dill._dill._create_code, args, obj=obj)
+ dill._dill.logger.trace(pickler, "# Co")
+ return
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/_filelock.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/_filelock.py
new file mode 100644
index 0000000000000000000000000000000000000000..19620e6e777505eaf314366f7f3c657fafc515e0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/_filelock.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+"""Utilities to handle file locking in `datasets`."""
+
+import os
+
+from filelock import FileLock as FileLock_
+from filelock import UnixFileLock
+from filelock import __version__ as _filelock_version
+from packaging import version
+
+
+class FileLock(FileLock_):
+ """
+ A `filelock.FileLock` initializer that handles long paths.
+ It also uses the current umask for lock files.
+ """
+
+ MAX_FILENAME_LENGTH = 255
+
+ def __init__(self, lock_file, *args, **kwargs):
+ # The "mode" argument is required if we want to use the current umask in filelock >= 3.10
+ # In previous previous it was already using the current umask.
+ if "mode" not in kwargs and version.parse(_filelock_version) >= version.parse("3.10.0"):
+ umask = os.umask(0o666)
+ os.umask(umask)
+ kwargs["mode"] = 0o666 & ~umask
+ lock_file = self.hash_filename_if_too_long(lock_file)
+ super().__init__(lock_file, *args, **kwargs)
+
+ @classmethod
+ def hash_filename_if_too_long(cls, path: str) -> str:
+ path = os.path.abspath(os.path.expanduser(path))
+ filename = os.path.basename(path)
+ max_filename_length = cls.MAX_FILENAME_LENGTH
+ if issubclass(cls, UnixFileLock):
+ max_filename_length = min(max_filename_length, os.statvfs(os.path.dirname(path)).f_namemax)
+ if len(filename) > max_filename_length:
+ dirname = os.path.dirname(path)
+ hashed_filename = str(hash(filename))
+ new_filename = (
+ filename[: max_filename_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
+ )
+ return os.path.join(dirname, new_filename)
+ else:
+ return path
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/beam_utils.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/beam_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..831354397cf2bb1c0ee464093484d53c037aa95c
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/beam_utils.py
@@ -0,0 +1,52 @@
+import os
+
+from apache_beam.io.filesystems import FileSystems
+from apache_beam.pipeline import Pipeline
+
+from .logging import get_logger
+
+
+CHUNK_SIZE = 2 << 20 # 2mb
+logger = get_logger(__name__)
+
+
+class BeamPipeline(Pipeline):
+ """Wrapper over `apache_beam.pipeline.Pipeline` for convenience"""
+
+ def is_local(self):
+ runner = self._options.get_all_options().get("runner")
+ return runner in [None, "DirectRunner", "PortableRunner"]
+
+
+def upload_local_to_remote(local_file_path, remote_file_path, force_upload=False):
+ """Use the Beam Filesystems to upload to a remote directory on gcs/s3/hdfs..."""
+ fs = FileSystems
+ if fs.exists(remote_file_path):
+ if force_upload:
+ logger.info(f"Remote path already exist: {remote_file_path}. Overwriting it as force_upload=True.")
+ else:
+ logger.info(f"Remote path already exist: {remote_file_path}. Skipping it as force_upload=False.")
+ return
+ with fs.create(remote_file_path) as remote_file:
+ with open(local_file_path, "rb") as local_file:
+ chunk = local_file.read(CHUNK_SIZE)
+ while chunk:
+ remote_file.write(chunk)
+ chunk = local_file.read(CHUNK_SIZE)
+
+
+def download_remote_to_local(remote_file_path, local_file_path, force_download=False):
+ """Use the Beam Filesystems to download from a remote directory on gcs/s3/hdfs..."""
+ fs = FileSystems
+ if os.path.exists(local_file_path):
+ if force_download:
+ logger.info(f"Local path already exist: {remote_file_path}. Overwriting it as force_upload=True.")
+ else:
+ logger.info(f"Local path already exist: {remote_file_path}. Skipping it as force_upload=False.")
+ return
+ with fs.open(remote_file_path) as remote_file:
+ with open(local_file_path, "wb") as local_file:
+ chunk = remote_file.read(CHUNK_SIZE)
+ while chunk:
+ local_file.write(chunk)
+ chunk = remote_file.read(CHUNK_SIZE)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/cache.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..5485441439d17051eb9a59bc7bc4155321923958
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/cache.py
@@ -0,0 +1,283 @@
+import json
+import os
+import re
+import shutil
+import tempfile
+from contextlib import contextmanager
+from functools import partial
+from pathlib import Path
+from urllib.parse import urljoin, urlparse
+
+import requests
+
+from datasets import DownloadConfig, config
+from datasets.utils.extract import ExtractManager
+from datasets.utils.file_utils import (
+ _raise_if_offline_mode_is_enabled,
+ ftp_get,
+ ftp_head,
+ get_authentication_headers_for_url,
+ hash_url_to_filename,
+ http_get,
+ http_head,
+ is_local_path,
+ is_remote_url,
+ logger,
+)
+from datasets.utils.filelock import FileLock
+
+
+def cached_path(
+ url_or_filename,
+ download_config=None,
+ **download_kwargs,
+) -> str:
+ """
+ Given something that might be a URL (or might be a local path),
+ determine which. If it's a URL, download the file and cache it, and
+ return the path to the cached file. If it's already a local path,
+ make sure the file exists and then return the path.
+
+ Return:
+ Local path (string)
+
+ Raises:
+ FileNotFoundError: in case of non-recoverable file
+ (non-existent or no cache on disk)
+ ConnectionError: in case of unreachable url
+ and no cache on disk
+ ValueError: if it couldn't parse the url or filename correctly
+ requests.exceptions.ConnectionError: in case of internet connection issue
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+
+ cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
+ if isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+ if isinstance(url_or_filename, Path):
+ url_or_filename = str(url_or_filename)
+
+ if is_remote_url(url_or_filename):
+ # URL, so get it from the cache (downloading if necessary)
+ output_path = get_from_cache(
+ url_or_filename,
+ cache_dir=cache_dir,
+ force_download=download_config.force_download,
+ proxies=download_config.proxies,
+ resume_download=download_config.resume_download,
+ user_agent=download_config.user_agent,
+ local_files_only=download_config.local_files_only,
+ use_etag=download_config.use_etag,
+ max_retries=download_config.max_retries,
+ use_auth_token=download_config.use_auth_token,
+ ignore_url_params=download_config.ignore_url_params,
+ download_desc=download_config.download_desc,
+ )
+ elif os.path.exists(url_or_filename):
+ # File, and it exists.
+ output_path = url_or_filename
+ elif is_local_path(url_or_filename):
+ # File, but it doesn't exist.
+ raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
+ else:
+ # Something unknown
+ raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
+
+ if output_path is None:
+ return output_path
+
+ if download_config.extract_compressed_file:
+ output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
+ output_path, force_extract=download_config.force_extract
+ )
+
+ return output_path
+
+
+def get_from_cache(
+ url,
+ cache_dir=None,
+ force_download=False,
+ proxies=None,
+ etag_timeout=100,
+ resume_download=False,
+ user_agent=None,
+ local_files_only=False,
+ use_etag=True,
+ max_retries=0,
+ use_auth_token=None,
+ ignore_url_params=False,
+ download_desc=None,
+) -> str:
+ """
+ Given a URL, look for the corresponding file in the local cache.
+ If it's not there, download it. Then return the path to the cached file.
+
+ Return:
+ Local path (string)
+
+ Raises:
+ FileNotFoundError: in case of non-recoverable file
+ (non-existent or no cache on disk)
+ ConnectionError: in case of unreachable url
+ and no cache on disk
+ """
+ if cache_dir is None:
+ cache_dir = config.HF_DATASETS_CACHE
+ if isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+
+ os.makedirs(cache_dir, exist_ok=True)
+
+ if ignore_url_params:
+ # strip all query parameters and #fragments from the URL
+ cached_url = urljoin(url, urlparse(url).path)
+ else:
+ cached_url = url # additional parameters may be added to the given URL
+
+ connected = False
+ response = None
+ cookies = None
+ etag = None
+ head_error = None
+
+ # Try a first time to file the file on the local file system without eTag (None)
+ # if we don't ask for 'force_download' then we spare a request
+ filename = hash_url_to_filename(cached_url, etag=None)
+ cache_path = os.path.join(cache_dir, filename)
+
+ if os.path.exists(cache_path) and not force_download and not use_etag:
+ return cache_path
+
+ # Prepare headers for authentication
+ headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
+ if user_agent is not None:
+ headers["user-agent"] = user_agent
+
+ # We don't have the file locally or we need an eTag
+ if not local_files_only:
+ if url.startswith("ftp://"):
+ connected = ftp_head(url)
+ try:
+ response = http_head(
+ url,
+ allow_redirects=True,
+ proxies=proxies,
+ timeout=etag_timeout,
+ max_retries=max_retries,
+ headers=headers,
+ )
+ if response.status_code == 200: # ok
+ etag = response.headers.get("ETag") if use_etag else None
+ for k, v in response.cookies.items():
+ # In some edge cases, we need to get a confirmation token
+ if k.startswith("download_warning") and "drive.google.com" in url:
+ url += "&confirm=" + v
+ cookies = response.cookies
+ connected = True
+ # Fix Google Drive URL to avoid Virus scan warning
+ if "drive.google.com" in url and "confirm=" not in url:
+ url += "&confirm=t"
+ # In some edge cases, head request returns 400 but the connection is actually ok
+ elif (
+ (response.status_code == 400 and "firebasestorage.googleapis.com" in url)
+ or (response.status_code == 405 and "drive.google.com" in url)
+ or (
+ response.status_code == 403
+ and (
+ re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
+ or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
+ )
+ )
+ or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
+ ):
+ connected = True
+ logger.info(f"Couldn't get ETag version for url {url}")
+ elif response.status_code == 401 and config.HF_ENDPOINT in url and use_auth_token is None:
+ raise ConnectionError(
+ f"Unauthorized for URL {url}. Please use the parameter `use_auth_token=True` after logging in with `huggingface-cli login`"
+ )
+ except (OSError, requests.exceptions.Timeout) as e:
+ # not connected
+ head_error = e
+ pass
+
+ # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
+ # try to get the last downloaded one
+ if not connected:
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+ if local_files_only:
+ raise FileNotFoundError(
+ f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
+ " disabled. To enable file online look-ups, set 'local_files_only' to False."
+ )
+ elif response is not None and response.status_code == 404:
+ raise FileNotFoundError(f"Couldn't find file at {url}")
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ if head_error is not None:
+ raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
+ elif response is not None:
+ raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
+ else:
+ raise ConnectionError(f"Couldn't reach {url}")
+
+ # Try a second time
+ filename = hash_url_to_filename(cached_url, etag)
+ cache_path = os.path.join(cache_dir, filename)
+
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+
+ # From now on, connected is True.
+ # Prevent parallel downloads of the same file with a lock.
+ lock_path = cache_path + ".lock"
+ with FileLock(lock_path):
+ if resume_download:
+ incomplete_path = cache_path + ".incomplete"
+
+ @contextmanager
+ def _resumable_file_manager():
+ with open(incomplete_path, "a+b") as f:
+ yield f
+
+ temp_file_manager = _resumable_file_manager
+ if os.path.exists(incomplete_path):
+ resume_size = os.stat(incomplete_path).st_size
+ else:
+ resume_size = 0
+ else:
+ temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
+ resume_size = 0
+
+ # Download to temporary file, then copy to cache dir once finished.
+ # Otherwise you get corrupt cache entries if the download gets interrupted.
+ with temp_file_manager() as temp_file:
+ logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
+
+ # GET file object
+ if url.startswith("ftp://"):
+ ftp_get(url, temp_file)
+ else:
+ http_get(
+ url,
+ temp_file,
+ proxies=proxies,
+ resume_size=resume_size,
+ headers=headers,
+ cookies=cookies,
+ max_retries=max_retries,
+ desc=download_desc,
+ )
+
+ logger.info(f"storing {url} in cache at {cache_path}")
+ shutil.move(temp_file.name, cache_path)
+
+ logger.info(f"creating metadata file for {cache_path}")
+ meta = {"url": url, "etag": etag}
+ meta_path = cache_path + ".json"
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
+ json.dump(meta, meta_file)
+
+ return cache_path
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/deprecation_utils.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/deprecation_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f05ecbeaa3eae5476e99c461dbede9ebfa111eb0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/deprecation_utils.py
@@ -0,0 +1,105 @@
+import enum
+import inspect
+import warnings
+from functools import wraps
+from typing import Callable, Optional
+
+from .logging import get_logger
+
+
+_emitted_deprecation_warnings = set()
+logger = get_logger(__name__)
+
+
+def deprecated(help_message: Optional[str] = None):
+ """Decorator to mark a class or a function as deprecated.
+
+ Args:
+ help_message (:obj:`str`, optional): An optional message to guide the user on how to
+ switch to non-deprecated usage of the library.
+ """
+
+ def decorator(deprecated_class_or_function: Callable):
+ global _emitted_deprecation_warnings
+
+ if inspect.isclass(deprecated_class_or_function):
+ deprecated_function = deprecated_class_or_function.__init__
+ name = deprecated_class_or_function.__name__
+ else:
+ deprecated_function = deprecated_class_or_function
+ name = deprecated_function.__name__
+ # Support deprecating __init__ class method: class name instead
+ name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2]
+
+ warning_msg = (
+ f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}"
+ if help_message
+ else ""
+ )
+
+ @wraps(deprecated_function)
+ def wrapper(*args, **kwargs):
+ func_hash = hash(deprecated_function)
+ if func_hash not in _emitted_deprecation_warnings:
+ warnings.warn(warning_msg, category=FutureWarning, stacklevel=2)
+ _emitted_deprecation_warnings.add(func_hash)
+ return deprecated_function(*args, **kwargs)
+
+ wrapper._decorator_name_ = "deprecated"
+
+ if inspect.isclass(deprecated_class_or_function):
+ deprecated_class_or_function.__init__ = wrapper
+ return deprecated_class_or_function
+ else:
+ return wrapper
+
+ return decorator
+
+
+class OnAccess(enum.EnumMeta):
+ """
+ Enum metaclass that calls a user-specified function whenever a member is accessed.
+ """
+
+ def __getattribute__(cls, name):
+ obj = super().__getattribute__(name)
+ if isinstance(obj, enum.Enum) and obj._on_access:
+ obj._on_access()
+ return obj
+
+ def __getitem__(cls, name):
+ member = super().__getitem__(name)
+ if member._on_access:
+ member._on_access()
+ return member
+
+ def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
+ obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start)
+ if isinstance(obj, enum.Enum) and obj._on_access:
+ obj._on_access()
+ return obj
+
+
+class DeprecatedEnum(enum.Enum, metaclass=OnAccess):
+ """
+ Enum class that calls `deprecate` method whenever a member is accessed.
+ """
+
+ def __new__(cls, value):
+ member = object.__new__(cls)
+ member._value_ = value
+ member._on_access = member.deprecate
+ return member
+
+ @property
+ def help_message(self):
+ return ""
+
+ def deprecate(self):
+ help_message = f" {self.help_message}" if self.help_message else ""
+ warnings.warn(
+ f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets."
+ + help_message,
+ FutureWarning,
+ stacklevel=3,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/doc_utils.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/doc_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ef8bcb4e70725ad086cb817e0ec4551d1c0966e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/doc_utils.py
@@ -0,0 +1,15 @@
+from typing import Callable
+
+
+def is_documented_by(function_with_docstring: Callable):
+ """Decorator to share docstrings across common functions.
+
+ Args:
+ function_with_docstring (`Callable`): Name of the function with the docstring.
+ """
+
+ def wrapper(target_function):
+ target_function.__doc__ = function_with_docstring.__doc__
+ return target_function
+
+ return wrapper
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/download_manager.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/download_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..b524c2f9686f65d083c424a4e17d001395b743b6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/download_manager.py
@@ -0,0 +1 @@
+# deprecated, please use datasets.download.download_manager
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/experimental.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/experimental.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc406154e9347f4df83b1f7b08c32a961d469f6a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/experimental.py
@@ -0,0 +1,43 @@
+"""Contains utilities to flag a feature as "experimental" in datasets."""
+
+import warnings
+from functools import wraps
+from typing import Callable
+
+
+def experimental(fn: Callable) -> Callable:
+ """Decorator to flag a feature as experimental.
+
+ An experimental feature trigger a warning when used as it might be subject to breaking changes in the future.
+
+ Args:
+ fn (`Callable`):
+ The function to flag as experimental.
+
+ Returns:
+ `Callable`: The decorated function.
+
+ Example:
+
+ ```python
+ >>> from datasets.utils import experimental
+
+ >>> @experimental
+ ... def my_function():
+ ... print("Hello world!")
+
+ >>> my_function()
+ UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future.
+ Hello world!
+ ```
+ """
+
+ @wraps(fn)
+ def _inner_fn(*args, **kwargs):
+ warnings.warn(
+ (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."),
+ UserWarning,
+ )
+ return fn(*args, **kwargs)
+
+ return _inner_fn
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/file_utils.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/file_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..acf0cda547db410c0e91a951760133a210753eb2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/file_utils.py
@@ -0,0 +1,1674 @@
+"""
+Utilities for working with the local dataset cache.
+This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
+Copyright by the AllenNLP authors.
+"""
+
+import copy
+import glob
+import io
+import json
+import multiprocessing
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tarfile
+import time
+import urllib
+import warnings
+import xml.dom.minidom
+import zipfile
+from asyncio import TimeoutError
+from contextlib import closing, contextmanager
+from functools import partial
+from io import BytesIO
+from itertools import chain
+from pathlib import Path, PurePosixPath
+from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, TypeVar, Union
+from unittest.mock import patch
+from urllib.parse import urljoin, urlparse
+from xml.etree import ElementTree as ET
+
+import fsspec
+import huggingface_hub
+import requests
+from aiohttp.client_exceptions import ClientError
+from fsspec.core import strip_protocol, url_to_fs
+from fsspec.utils import can_be_local
+from huggingface_hub.utils import EntryNotFoundError, insecure_hashlib
+from packaging import version
+
+from .. import __version__, config
+from ..download.download_config import DownloadConfig
+from ..filesystems import COMPRESSION_FILESYSTEMS
+from . import _tqdm, logging
+from . import tqdm as hf_tqdm
+from ._filelock import FileLock
+from .extract import ExtractManager
+from .track import TrackedIterable
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+INCOMPLETE_SUFFIX = ".incomplete"
+
+T = TypeVar("T", str, Path)
+
+
+def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
+ """
+ Add hf_modules_cache to the python path.
+ By default hf_modules_cache='~/.cache/huggingface/modules'.
+ It can also be set with the environment variable HF_MODULES_CACHE.
+ This is used to add modules such as `datasets_modules`
+ """
+ hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
+ hf_modules_cache = str(hf_modules_cache)
+ if hf_modules_cache not in sys.path:
+ sys.path.append(hf_modules_cache)
+
+ os.makedirs(hf_modules_cache, exist_ok=True)
+ if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
+ with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
+ pass
+ return hf_modules_cache
+
+
+def is_remote_url(url_or_filename: str) -> bool:
+ return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/")
+
+
+def is_local_path(url_or_filename: str) -> bool:
+ # On unix the scheme of a local path is empty (for both absolute and relative),
+ # while on windows the scheme is the drive name (ex: "c") for absolute paths.
+ # for details on the windows behavior, see https://bugs.python.org/issue42215
+ return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
+
+
+def is_relative_path(url_or_filename: str) -> bool:
+ return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
+
+
+def relative_to_absolute_path(path: T) -> T:
+ """Convert relative path to absolute path."""
+ abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path))))
+ return Path(abs_path_str) if isinstance(path, Path) else abs_path_str
+
+
+def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
+ if dataset:
+ endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
+ else:
+ endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
+ return "/".join((endpoint, identifier, filename))
+
+
+def head_hf_s3(
+ identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
+) -> Union[requests.Response, Exception]:
+ return http_head(
+ hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
+ max_retries=max_retries,
+ )
+
+
+def hf_github_url(path: str, name: str, dataset=True, revision: Optional[str] = None) -> str:
+ default_revision = "main" if version.parse(__version__).is_devrelease else __version__
+ revision = revision or default_revision
+ if dataset:
+ return config.REPO_DATASETS_URL.format(revision=revision, path=path, name=name)
+ else:
+ return config.REPO_METRICS_URL.format(revision=revision, path=path, name=name)
+
+
+def url_or_path_join(base_name: str, *pathnames: str) -> str:
+ if is_remote_url(base_name):
+ return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames))
+ else:
+ return Path(base_name, *pathnames).as_posix()
+
+
+def url_or_path_parent(url_or_path: str) -> str:
+ if is_remote_url(url_or_path):
+ return url_or_path[: url_or_path.rindex("/")]
+ else:
+ return os.path.dirname(url_or_path)
+
+
+def hash_url_to_filename(url, etag=None):
+ """
+ Convert `url` into a hashed filename in a repeatable way.
+ If `etag` is specified, append its hash to the url's, delimited
+ by a period.
+ If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
+ so that TF 2.0 can identify it as a HDF5 file
+ (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
+ """
+ url_bytes = url.encode("utf-8")
+ url_hash = insecure_hashlib.sha256(url_bytes)
+ filename = url_hash.hexdigest()
+
+ if etag:
+ etag_bytes = etag.encode("utf-8")
+ etag_hash = insecure_hashlib.sha256(etag_bytes)
+ filename += "." + etag_hash.hexdigest()
+
+ if url.endswith(".py"):
+ filename += ".py"
+
+ return filename
+
+
+def cached_path(
+ url_or_filename,
+ download_config=None,
+ **download_kwargs,
+) -> str:
+ """
+ Given something that might be a URL (or might be a local path),
+ determine which. If it's a URL, download the file and cache it, and
+ return the path to the cached file. If it's already a local path,
+ make sure the file exists and then return the path.
+
+ Return:
+ Local path (string)
+
+ Raises:
+ FileNotFoundError: in case of non-recoverable file
+ (non-existent or no cache on disk)
+ ConnectionError: in case of unreachable url
+ and no cache on disk
+ ValueError: if it couldn't parse the url or filename correctly
+ requests.exceptions.ConnectionError: in case of internet connection issue
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+
+ cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
+ if isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+ if isinstance(url_or_filename, Path):
+ url_or_filename = str(url_or_filename)
+
+ # Convert fsspec URL in the format "file://local/path" to "local/path"
+ if can_be_local(url_or_filename):
+ url_or_filename = strip_protocol(url_or_filename)
+
+ if is_remote_url(url_or_filename):
+ # URL, so get it from the cache (downloading if necessary)
+ output_path = get_from_cache(
+ url_or_filename,
+ cache_dir=cache_dir,
+ force_download=download_config.force_download,
+ proxies=download_config.proxies,
+ resume_download=download_config.resume_download,
+ user_agent=download_config.user_agent,
+ local_files_only=download_config.local_files_only,
+ use_etag=download_config.use_etag,
+ max_retries=download_config.max_retries,
+ token=download_config.token,
+ ignore_url_params=download_config.ignore_url_params,
+ storage_options=download_config.storage_options,
+ download_desc=download_config.download_desc,
+ disable_tqdm=download_config.disable_tqdm,
+ )
+ elif os.path.exists(url_or_filename):
+ # File, and it exists.
+ output_path = url_or_filename
+ elif is_local_path(url_or_filename):
+ # File, but it doesn't exist.
+ raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
+ else:
+ # Something unknown
+ raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
+
+ if output_path is None:
+ return output_path
+
+ if download_config.extract_compressed_file:
+ if download_config.extract_on_the_fly:
+ # Add a compression prefix to the compressed file so that it can be extracted
+ # as it's being read using xopen.
+ protocol = _get_extraction_protocol(output_path, download_config=download_config)
+ extension = _get_path_extension(url_or_filename.split("::")[0])
+ if (
+ protocol
+ and extension not in ["tgz", "tar"]
+ and not url_or_filename.split("::")[0].endswith((".tar.gz", ".tar.bz2", ".tar.xz"))
+ ):
+ output_path = relative_to_absolute_path(output_path)
+ if protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
+ # there is one single file which is the uncompressed file
+ inner_file = os.path.basename(output_path)
+ inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
+ output_path = f"{protocol}://{inner_file}::{output_path}"
+ else:
+ output_path = f"{protocol}://::{output_path}"
+ return output_path
+
+ # Eager extraction
+ output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
+ output_path, force_extract=download_config.force_extract
+ )
+ return relative_to_absolute_path(output_path)
+
+
+def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
+ ua = f"datasets/{__version__}"
+ ua += f"; python/{config.PY_VERSION}"
+ ua += f"; huggingface_hub/{huggingface_hub.__version__}"
+ ua += f"; pyarrow/{config.PYARROW_VERSION}"
+ if config.TORCH_AVAILABLE:
+ ua += f"; torch/{config.TORCH_VERSION}"
+ if config.TF_AVAILABLE:
+ ua += f"; tensorflow/{config.TF_VERSION}"
+ if config.JAX_AVAILABLE:
+ ua += f"; jax/{config.JAX_VERSION}"
+ if config.BEAM_AVAILABLE:
+ ua += f"; apache_beam/{config.BEAM_VERSION}"
+ if isinstance(user_agent, dict):
+ ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}"
+ elif isinstance(user_agent, str):
+ ua += "; " + user_agent
+ return ua
+
+
+def get_authentication_headers_for_url(
+ url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated"
+) -> dict:
+ """Handle the HF authentication"""
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if url.startswith(config.HF_ENDPOINT):
+ return huggingface_hub.utils.build_hf_headers(
+ token=token, library_name="datasets", library_version=__version__
+ )
+ else:
+ return {}
+
+
+class OfflineModeIsEnabled(ConnectionError):
+ pass
+
+
+def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
+ """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True."""
+ if config.HF_DATASETS_OFFLINE:
+ raise OfflineModeIsEnabled(
+ "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
+ )
+
+
+def _request_with_retry(
+ method: str,
+ url: str,
+ max_retries: int = 0,
+ base_wait_time: float = 0.5,
+ max_wait_time: float = 2,
+ timeout: float = 10.0,
+ **params,
+) -> requests.Response:
+ """Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.
+
+ Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.
+
+ Args:
+ method (str): HTTP method, such as 'GET' or 'HEAD'.
+ url (str): The URL of the resource to fetch.
+ max_retries (int): Maximum number of retries, defaults to 0 (no retries).
+ base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
+ retries then grows exponentially, capped by max_wait_time.
+ max_wait_time (float): Maximum amount of time between two retries, in seconds.
+ **params (additional keyword arguments): Params to pass to :obj:`requests.request`.
+ """
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ tries, success = 0, False
+ while not success:
+ tries += 1
+ try:
+ response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
+ success = True
+ except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err:
+ if tries > max_retries:
+ raise err
+ else:
+ logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
+ sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
+ time.sleep(sleep_time)
+ return response
+
+
+def fsspec_head(url, storage_options=None):
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ fs, path = url_to_fs(url, **(storage_options or {}))
+ return fs.info(path)
+
+
+def stack_multiprocessing_download_progress_bars():
+ # Stack downloads progress bars automatically using HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS=1
+ # We use environment variables since the download may happen in a subprocess
+ return patch.dict(os.environ, {"HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS": "1"})
+
+
+class TqdmCallback(fsspec.callbacks.TqdmCallback):
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
+ if config.FSSPEC_VERSION < version.parse("2024.2.0"):
+ super().__init__(tqdm_kwargs, *args, **kwargs)
+ self._tqdm = _tqdm # replace tqdm module by datasets.utils.tqdm module
+ else:
+ kwargs["tqdm_cls"] = _tqdm.tqdm
+ super().__init__(tqdm_kwargs, *args, **kwargs)
+
+
+def fsspec_get(url, temp_file, storage_options=None, desc=None, disable_tqdm=False):
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ fs, path = url_to_fs(url, **(storage_options or {}))
+ callback = TqdmCallback(
+ tqdm_kwargs={
+ "desc": desc or "Downloading",
+ "unit": "B",
+ "unit_scale": True,
+ "position": multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
+ if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
+ and multiprocessing.current_process()._identity
+ else None,
+ "disable": disable_tqdm,
+ }
+ )
+ fs.get_file(path, temp_file.name, callback=callback)
+
+
+def ftp_head(url, timeout=10.0):
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ try:
+ with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
+ r.read(1)
+ except Exception:
+ return False
+ return True
+
+
+def ftp_get(url, temp_file, timeout=10.0):
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ try:
+ logger.info(f"Getting through FTP {url} into {temp_file.name}")
+ with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
+ shutil.copyfileobj(r, temp_file)
+ except urllib.error.URLError as e:
+ raise ConnectionError(e) from None
+
+
+def http_get(
+ url,
+ temp_file,
+ proxies=None,
+ resume_size=0,
+ headers=None,
+ cookies=None,
+ timeout=100.0,
+ max_retries=0,
+ desc=None,
+ disable_tqdm=False,
+) -> Optional[requests.Response]:
+ headers = dict(headers) if headers is not None else {}
+ headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
+ if resume_size > 0:
+ headers["Range"] = f"bytes={resume_size:d}-"
+ response = _request_with_retry(
+ method="GET",
+ url=url,
+ stream=True,
+ proxies=proxies,
+ headers=headers,
+ cookies=cookies,
+ max_retries=max_retries,
+ timeout=timeout,
+ )
+ if temp_file is None:
+ return response
+ if response.status_code == 416: # Range not satisfiable
+ return
+ content_length = response.headers.get("Content-Length")
+ total = resume_size + int(content_length) if content_length is not None else None
+ with hf_tqdm(
+ unit="B",
+ unit_scale=True,
+ total=total,
+ initial=resume_size,
+ desc=desc or "Downloading",
+ position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
+ if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
+ and multiprocessing.current_process()._identity
+ else None,
+ disable=disable_tqdm,
+ ) as progress:
+ for chunk in response.iter_content(chunk_size=1024):
+ progress.update(len(chunk))
+ temp_file.write(chunk)
+
+
+def http_head(
+ url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
+) -> requests.Response:
+ headers = copy.deepcopy(headers) or {}
+ headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
+ response = _request_with_retry(
+ method="HEAD",
+ url=url,
+ proxies=proxies,
+ headers=headers,
+ cookies=cookies,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ max_retries=max_retries,
+ )
+ return response
+
+
+def request_etag(
+ url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated"
+) -> Optional[str]:
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if urlparse(url).scheme not in ("http", "https"):
+ return None
+ headers = get_authentication_headers_for_url(url, token=token)
+ response = http_head(url, headers=headers, max_retries=3)
+ response.raise_for_status()
+ etag = response.headers.get("ETag") if response.ok else None
+ return etag
+
+
+def get_from_cache(
+ url,
+ cache_dir=None,
+ force_download=False,
+ proxies=None,
+ etag_timeout=100,
+ resume_download=False,
+ user_agent=None,
+ local_files_only=False,
+ use_etag=True,
+ max_retries=0,
+ token=None,
+ use_auth_token="deprecated",
+ ignore_url_params=False,
+ storage_options=None,
+ download_desc=None,
+ disable_tqdm=False,
+) -> str:
+ """
+ Given a URL, look for the corresponding file in the local cache.
+ If it's not there, download it. Then return the path to the cached file.
+
+ Return:
+ Local path (string)
+
+ Raises:
+ FileNotFoundError: in case of non-recoverable file
+ (non-existent or no cache on disk)
+ ConnectionError: in case of unreachable url
+ and no cache on disk
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if cache_dir is None:
+ cache_dir = config.HF_DATASETS_CACHE
+ if isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+
+ os.makedirs(cache_dir, exist_ok=True)
+
+ if ignore_url_params:
+ # strip all query parameters and #fragments from the URL
+ cached_url = urljoin(url, urlparse(url).path)
+ else:
+ cached_url = url # additional parameters may be added to the given URL
+
+ connected = False
+ response = None
+ cookies = None
+ etag = None
+ head_error = None
+ scheme = None
+
+ # Try a first time to file the file on the local file system without eTag (None)
+ # if we don't ask for 'force_download' then we spare a request
+ filename = hash_url_to_filename(cached_url, etag=None)
+ cache_path = os.path.join(cache_dir, filename)
+
+ if os.path.exists(cache_path) and not force_download and not use_etag:
+ return cache_path
+
+ # Prepare headers for authentication
+ headers = get_authentication_headers_for_url(url, token=token)
+ if user_agent is not None:
+ headers["user-agent"] = user_agent
+
+ # We don't have the file locally or we need an eTag
+ if not local_files_only:
+ scheme = urlparse(url).scheme
+ if scheme == "ftp":
+ connected = ftp_head(url)
+ elif scheme not in ("http", "https"):
+ response = fsspec_head(url, storage_options=storage_options)
+ # s3fs uses "ETag", gcsfs uses "etag"
+ etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None
+ connected = True
+ try:
+ response = http_head(
+ url,
+ allow_redirects=True,
+ proxies=proxies,
+ timeout=etag_timeout,
+ max_retries=max_retries,
+ headers=headers,
+ )
+ if response.status_code == 200: # ok
+ etag = response.headers.get("ETag") if use_etag else None
+ for k, v in response.cookies.items():
+ # In some edge cases, we need to get a confirmation token
+ if k.startswith("download_warning") and "drive.google.com" in url:
+ url += "&confirm=" + v
+ cookies = response.cookies
+ connected = True
+ # Fix Google Drive URL to avoid Virus scan warning
+ if "drive.google.com" in url and "confirm=" not in url:
+ url += "&confirm=t"
+ # In some edge cases, head request returns 400 but the connection is actually ok
+ elif (
+ (response.status_code == 400 and "firebasestorage.googleapis.com" in url)
+ or (response.status_code == 405 and "drive.google.com" in url)
+ or (
+ response.status_code == 403
+ and (
+ re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
+ or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
+ )
+ )
+ or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
+ ):
+ connected = True
+ logger.info(f"Couldn't get ETag version for url {url}")
+ elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None:
+ raise ConnectionError(
+ f"Unauthorized for URL {url}. Please use the parameter `token=True` after logging in with `huggingface-cli login`"
+ )
+ except (OSError, requests.exceptions.Timeout) as e:
+ # not connected
+ head_error = e
+ pass
+
+ # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
+ # try to get the last downloaded one
+ if not connected:
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+ if local_files_only:
+ raise FileNotFoundError(
+ f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
+ " disabled. To enable file online look-ups, set 'local_files_only' to False."
+ )
+ elif response is not None and response.status_code == 404:
+ raise FileNotFoundError(f"Couldn't find file at {url}")
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ if head_error is not None:
+ raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
+ elif response is not None:
+ raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
+ else:
+ raise ConnectionError(f"Couldn't reach {url}")
+
+ # Try a second time
+ filename = hash_url_to_filename(cached_url, etag)
+ cache_path = os.path.join(cache_dir, filename)
+
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+
+ # From now on, connected is True.
+ # Prevent parallel downloads of the same file with a lock.
+ lock_path = cache_path + ".lock"
+ with FileLock(lock_path):
+ # Retry in case previously locked processes just enter after the precedent process releases the lock
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+
+ incomplete_path = cache_path + ".incomplete"
+
+ @contextmanager
+ def temp_file_manager(mode="w+b"):
+ with open(incomplete_path, mode) as f:
+ yield f
+
+ resume_size = 0
+ if resume_download:
+ temp_file_manager = partial(temp_file_manager, mode="a+b")
+ if os.path.exists(incomplete_path):
+ resume_size = os.stat(incomplete_path).st_size
+
+ # Download to temporary file, then copy to cache path once finished.
+ # Otherwise, you get corrupt cache entries if the download gets interrupted.
+ with temp_file_manager() as temp_file:
+ logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
+
+ # GET file object
+ if scheme == "ftp":
+ ftp_get(url, temp_file)
+ elif scheme not in ("http", "https"):
+ fsspec_get(
+ url, temp_file, storage_options=storage_options, desc=download_desc, disable_tqdm=disable_tqdm
+ )
+ else:
+ http_get(
+ url,
+ temp_file=temp_file,
+ proxies=proxies,
+ resume_size=resume_size,
+ headers=headers,
+ cookies=cookies,
+ max_retries=max_retries,
+ desc=download_desc,
+ disable_tqdm=disable_tqdm,
+ )
+
+ logger.info(f"storing {url} in cache at {cache_path}")
+ shutil.move(temp_file.name, cache_path)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(cache_path, 0o666 & ~umask)
+
+ logger.info(f"creating metadata file for {cache_path}")
+ meta = {"url": url, "etag": etag}
+ meta_path = cache_path + ".json"
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
+ json.dump(meta, meta_file)
+
+ return cache_path
+
+
+def add_start_docstrings(*docstr):
+ def docstring_decorator(fn):
+ fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "")
+ return fn
+
+ return docstring_decorator
+
+
+def add_end_docstrings(*docstr):
+ def docstring_decorator(fn):
+ fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr)
+ return fn
+
+ return docstring_decorator
+
+
+def estimate_dataset_size(paths):
+ return sum(path.stat().st_size for path in paths)
+
+
+def readline(f: io.RawIOBase):
+ # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525
+ res = bytearray()
+ while True:
+ b = f.read(1)
+ if not b:
+ break
+ res += b
+ if res.endswith(b"\n"):
+ break
+ return bytes(res)
+
+
+#######################
+# Streaming utilities #
+#######################
+
+BASE_KNOWN_EXTENSIONS = [
+ "txt",
+ "csv",
+ "json",
+ "jsonl",
+ "tsv",
+ "conll",
+ "conllu",
+ "orig",
+ "parquet",
+ "pkl",
+ "pickle",
+ "rel",
+ "xml",
+]
+COMPRESSION_EXTENSION_TO_PROTOCOL = {
+ # single file compression
+ **{fs_class.extension.lstrip("."): fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS},
+ # archive compression
+ "zip": "zip",
+}
+SINGLE_FILE_COMPRESSION_PROTOCOLS = {fs_class.protocol for fs_class in COMPRESSION_FILESYSTEMS}
+SINGLE_SLASH_AFTER_PROTOCOL_PATTERN = re.compile(r"(? str:
+ # Get extension: https://foo.bar/train.json.gz -> gz
+ extension = path.split(".")[-1]
+ # Remove query params ("dl=1", "raw=true"): gz?dl=1 -> gz
+ # Remove shards infos (".txt_1", ".txt-00000-of-00100"): txt_1 -> txt
+ for symb in "?-_":
+ extension = extension.split(symb)[0]
+ return extension
+
+
+def _get_extraction_protocol_with_magic_number(f) -> Optional[str]:
+ """read the magic number from a file-like object and return the compression protocol"""
+ # Check if the file object is seekable even before reading the magic number (to avoid https://bugs.python.org/issue26440)
+ try:
+ f.seek(0)
+ except (AttributeError, io.UnsupportedOperation):
+ return None
+ magic_number = f.read(MAGIC_NUMBER_MAX_LENGTH)
+ f.seek(0)
+ for i in range(MAGIC_NUMBER_MAX_LENGTH):
+ compression = MAGIC_NUMBER_TO_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
+ if compression is not None:
+ return compression
+ compression = MAGIC_NUMBER_TO_UNSUPPORTED_COMPRESSION_PROTOCOL.get(magic_number[: MAGIC_NUMBER_MAX_LENGTH - i])
+ if compression is not None:
+ raise NotImplementedError(f"Compression protocol '{compression}' not implemented.")
+
+
+def _get_extraction_protocol(urlpath: str, download_config: Optional[DownloadConfig] = None) -> Optional[str]:
+ # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
+ urlpath = str(urlpath)
+ path = urlpath.split("::")[0]
+ extension = _get_path_extension(path)
+ if (
+ extension in BASE_KNOWN_EXTENSIONS
+ or extension in ["tgz", "tar"]
+ or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz"))
+ ):
+ return None
+ elif extension in COMPRESSION_EXTENSION_TO_PROTOCOL:
+ return COMPRESSION_EXTENSION_TO_PROTOCOL[extension]
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
+ try:
+ with fsspec.open(urlpath, **(storage_options or {})) as f:
+ return _get_extraction_protocol_with_magic_number(f)
+ except FileNotFoundError:
+ if urlpath.startswith(config.HF_ENDPOINT):
+ raise FileNotFoundError(
+ urlpath + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`."
+ ) from None
+ else:
+ raise
+
+
+def xjoin(a, *p):
+ """
+ This function extends os.path.join to support the "::" hop separator. It supports both paths and urls.
+
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
+ This is used to access files inside a zip file over http for example.
+
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
+ Then you can just chain the url this way:
+
+ zip://folder1/file.txt::https://host.com/archive.zip
+
+ The xjoin function allows you to apply the join on the first path of the chain.
+
+ Example::
+
+ >>> xjoin("zip://folder1::https://host.com/archive.zip", "file.txt")
+ zip://folder1/file.txt::https://host.com/archive.zip
+ """
+ a, *b = str(a).split("::")
+ if is_local_path(a):
+ return os.path.join(a, *p)
+ else:
+ a = posixpath.join(a, *p)
+ return "::".join([a] + b)
+
+
+def xdirname(a):
+ """
+ This function extends os.path.dirname to support the "::" hop separator. It supports both paths and urls.
+
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
+ This is used to access files inside a zip file over http for example.
+
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
+ Then you can just chain the url this way:
+
+ zip://folder1/file.txt::https://host.com/archive.zip
+
+ The xdirname function allows you to apply the dirname on the first path of the chain.
+
+ Example::
+
+ >>> xdirname("zip://folder1/file.txt::https://host.com/archive.zip")
+ zip://folder1::https://host.com/archive.zip
+ """
+ a, *b = str(a).split("::")
+ if is_local_path(a):
+ a = os.path.dirname(Path(a).as_posix())
+ else:
+ a = posixpath.dirname(a)
+ # if we end up at the root of the protocol, we get for example a = 'http:'
+ # so we have to fix it by adding the '//' that was removed:
+ if a.endswith(":"):
+ a += "//"
+ return "::".join([a] + b)
+
+
+def xexists(urlpath: str, download_config: Optional[DownloadConfig] = None):
+ """Extend `os.path.exists` function to support both local and remote files.
+
+ Args:
+ urlpath (`str`): URL path.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Returns:
+ `bool`
+ """
+
+ main_hop, *rest_hops = _as_str(urlpath).split("::")
+ if is_local_path(main_hop):
+ return os.path.exists(main_hop)
+ else:
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
+ main_hop, *rest_hops = urlpath.split("::")
+ fs, *_ = url_to_fs(urlpath, **storage_options)
+ return fs.exists(main_hop)
+
+
+def xbasename(a):
+ """
+ This function extends os.path.basename to support the "::" hop separator. It supports both paths and urls.
+
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
+ This is used to access files inside a zip file over http for example.
+
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
+ Then you can just chain the url this way:
+
+ zip://folder1/file.txt::https://host.com/archive.zip
+
+ The xbasename function allows you to apply the basename on the first path of the chain.
+
+ Example::
+
+ >>> xbasename("zip://folder1/file.txt::https://host.com/archive.zip")
+ file.txt
+ """
+ a, *b = str(a).split("::")
+ if is_local_path(a):
+ return os.path.basename(Path(a).as_posix())
+ else:
+ return posixpath.basename(a)
+
+
+def xsplit(a):
+ """
+ This function extends os.path.split to support the "::" hop separator. It supports both paths and urls.
+
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
+ This is used to access files inside a zip file over http for example.
+
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
+ Then you can just chain the url this way:
+
+ zip://folder1/file.txt::https://host.com/archive.zip
+
+ The xsplit function allows you to apply the xsplit on the first path of the chain.
+
+ Example::
+
+ >>> xsplit("zip://folder1/file.txt::https://host.com/archive.zip")
+ ('zip://folder1::https://host.com/archive.zip', 'file.txt')
+ """
+ a, *b = str(a).split("::")
+ if is_local_path(a):
+ return os.path.split(Path(a).as_posix())
+ else:
+ a, tail = posixpath.split(a)
+ return "::".join([a + "//" if a.endswith(":") else a] + b), tail
+
+
+def xsplitext(a):
+ """
+ This function extends os.path.splitext to support the "::" hop separator. It supports both paths and urls.
+
+ A shorthand, particularly useful where you have multiple hops, is to “chain” the URLs with the special separator "::".
+ This is used to access files inside a zip file over http for example.
+
+ Let's say you have a zip file at https://host.com/archive.zip, and you want to access the file inside the zip file at /folder1/file.txt.
+ Then you can just chain the url this way:
+
+ zip://folder1/file.txt::https://host.com/archive.zip
+
+ The xsplitext function allows you to apply the splitext on the first path of the chain.
+
+ Example::
+
+ >>> xsplitext("zip://folder1/file.txt::https://host.com/archive.zip")
+ ('zip://folder1/file::https://host.com/archive.zip', '.txt')
+ """
+ a, *b = str(a).split("::")
+ if is_local_path(a):
+ return os.path.splitext(Path(a).as_posix())
+ else:
+ a, ext = posixpath.splitext(a)
+ return "::".join([a] + b), ext
+
+
+def xisfile(path, download_config: Optional[DownloadConfig] = None) -> bool:
+ """Extend `os.path.isfile` function to support remote files.
+
+ Args:
+ path (`str`): URL path.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Returns:
+ `bool`
+ """
+ main_hop, *rest_hops = str(path).split("::")
+ if is_local_path(main_hop):
+ return os.path.isfile(path)
+ else:
+ path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
+ main_hop, *rest_hops = path.split("::")
+ fs, *_ = url_to_fs(path, **storage_options)
+ return fs.isfile(main_hop)
+
+
+def xgetsize(path, download_config: Optional[DownloadConfig] = None) -> int:
+ """Extend `os.path.getsize` function to support remote files.
+
+ Args:
+ path (`str`): URL path.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Returns:
+ `int`: optional
+ """
+ main_hop, *rest_hops = str(path).split("::")
+ if is_local_path(main_hop):
+ return os.path.getsize(path)
+ else:
+ path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
+ main_hop, *rest_hops = path.split("::")
+ fs, *_ = fs, *_ = url_to_fs(path, **storage_options)
+ try:
+ size = fs.size(main_hop)
+ except EntryNotFoundError:
+ raise FileNotFoundError(f"No such file: {path}")
+ if size is None:
+ # use xopen instead of fs.open to make data fetching more robust
+ with xopen(path, download_config=download_config) as f:
+ size = len(f.read())
+ return size
+
+
+def xisdir(path, download_config: Optional[DownloadConfig] = None) -> bool:
+ """Extend `os.path.isdir` function to support remote files.
+
+ Args:
+ path (`str`): URL path.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Returns:
+ `bool`
+ """
+ main_hop, *rest_hops = str(path).split("::")
+ if is_local_path(main_hop):
+ return os.path.isdir(path)
+ else:
+ path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
+ main_hop, *rest_hops = path.split("::")
+ fs, *_ = fs, *_ = url_to_fs(path, **storage_options)
+ inner_path = main_hop.split("://")[-1]
+ if not inner_path.strip("/"):
+ return True
+ return fs.isdir(inner_path)
+
+
+def xrelpath(path, start=None):
+ """Extend `os.path.relpath` function to support remote files.
+
+ Args:
+ path (`str`): URL path.
+ start (`str`): Start URL directory path.
+
+ Returns:
+ `str`
+ """
+ main_hop, *rest_hops = str(path).split("::")
+ if is_local_path(main_hop):
+ return os.path.relpath(main_hop, start=start) if start else os.path.relpath(main_hop)
+ else:
+ return posixpath.relpath(main_hop, start=str(start).split("::")[0]) if start else os.path.relpath(main_hop)
+
+
+def _add_retries_to_file_obj_read_method(file_obj):
+ read = file_obj.read
+ max_retries = config.STREAMING_READ_MAX_RETRIES
+
+ def read_with_retries(*args, **kwargs):
+ disconnect_err = None
+ for retry in range(1, max_retries + 1):
+ try:
+ out = read(*args, **kwargs)
+ break
+ except (ClientError, TimeoutError) as err:
+ disconnect_err = err
+ logger.warning(
+ f"Got disconnected from remote data host. Retrying in {config.STREAMING_READ_RETRY_INTERVAL}sec [{retry}/{max_retries}]"
+ )
+ time.sleep(config.STREAMING_READ_RETRY_INTERVAL)
+ else:
+ raise ConnectionError("Server Disconnected") from disconnect_err
+ return out
+
+ try:
+ file_obj.read = read_with_retries
+ except AttributeError: # read-only attribute
+ orig_file_obj = file_obj
+ file_obj = io.RawIOBase()
+ file_obj.read = read_with_retries
+ file_obj.__getattr__ = lambda _, attr: getattr(orig_file_obj, attr)
+ return file_obj
+
+
+def _prepare_path_and_storage_options(
+ urlpath: str, download_config: Optional[DownloadConfig] = None
+) -> Tuple[str, Dict[str, Dict[str, Any]]]:
+ prepared_urlpath = []
+ prepared_storage_options = {}
+ for hop in urlpath.split("::"):
+ hop, storage_options = _prepare_single_hop_path_and_storage_options(hop, download_config=download_config)
+ prepared_urlpath.append(hop)
+ prepared_storage_options.update(storage_options)
+ return "::".join(prepared_urlpath), storage_options
+
+
+def _prepare_single_hop_path_and_storage_options(
+ urlpath: str, download_config: Optional[DownloadConfig] = None
+) -> Tuple[str, Dict[str, Dict[str, Any]]]:
+ """
+ Prepare the URL and the kwargs that must be passed to the HttpFileSystem or to requests.get/head
+
+ In particular it resolves google drive URLs
+ It also adds the authentication headers for the Hugging Face Hub, for both https:// and hf:// paths.
+
+ Storage options are formatted in the form {protocol: storage_options_for_protocol}
+ """
+ token = None if download_config is None else download_config.token
+ if urlpath.startswith(config.HF_ENDPOINT) and "/resolve/" in urlpath:
+ urlpath = "hf://" + urlpath[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
+ protocol = urlpath.split("://")[0] if "://" in urlpath else "file"
+ if download_config is not None and protocol in download_config.storage_options:
+ storage_options = download_config.storage_options[protocol]
+ elif download_config is not None and protocol not in download_config.storage_options:
+ storage_options = {
+ option_name: option_value
+ for option_name, option_value in download_config.storage_options.items()
+ if option_name not in fsspec.available_protocols()
+ }
+ else:
+ storage_options = {}
+ if storage_options:
+ storage_options = {protocol: storage_options}
+ if protocol in ["http", "https"]:
+ storage_options[protocol] = {
+ "headers": {
+ **get_authentication_headers_for_url(urlpath, token=token),
+ "user-agent": get_datasets_user_agent(),
+ },
+ "client_kwargs": {"trust_env": True}, # Enable reading proxy env variables.
+ **(storage_options.get(protocol, {})),
+ }
+ if "drive.google.com" in urlpath:
+ response = http_head(urlpath)
+ cookies = None
+ for k, v in response.cookies.items():
+ if k.startswith("download_warning"):
+ urlpath += "&confirm=" + v
+ cookies = response.cookies
+ storage_options[protocol] = {"cookies": cookies, **storage_options.get(protocol, {})}
+ # Fix Google Drive URL to avoid Virus scan warning
+ if "drive.google.com" in urlpath and "confirm=" not in urlpath:
+ urlpath += "&confirm=t"
+ if urlpath.startswith("https://raw.githubusercontent.com/"):
+ # Workaround for served data with gzip content-encoding: https://github.com/fsspec/filesystem_spec/issues/389
+ storage_options[protocol]["headers"]["Accept-Encoding"] = "identity"
+ elif protocol == "hf":
+ storage_options[protocol] = {
+ "token": token,
+ "endpoint": config.HF_ENDPOINT,
+ **storage_options.get(protocol, {}),
+ }
+ # streaming with block_size=0 is only implemented in 0.21 (see https://github.com/huggingface/huggingface_hub/pull/1967)
+ if config.HF_HUB_VERSION < version.parse("0.21.0"):
+ storage_options[protocol]["block_size"] = "default"
+ return urlpath, storage_options
+
+
+def xopen(file: str, mode="r", *args, download_config: Optional[DownloadConfig] = None, **kwargs):
+ """Extend `open` function to support remote files using `fsspec`.
+
+ It also has a retry mechanism in case connection fails.
+ The `args` and `kwargs` are passed to `fsspec.open`, except `token` which is used for queries to private repos on huggingface.co
+
+ Args:
+ file (`str`): Path name of the file to be opened.
+ mode (`str`, *optional*, default "r"): Mode in which the file is opened.
+ *args: Arguments to be passed to `fsspec.open`.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+ **kwargs: Keyword arguments to be passed to `fsspec.open`.
+
+ Returns:
+ file object
+ """
+ # This works as well for `xopen(str(Path(...)))`
+ file_str = _as_str(file)
+ main_hop, *rest_hops = file_str.split("::")
+ if is_local_path(main_hop):
+ # ignore fsspec-specific kwargs
+ kwargs.pop("block_size", None)
+ return open(main_hop, mode, *args, **kwargs)
+ # add headers and cookies for authentication on the HF Hub and for Google Drive
+ file, storage_options = _prepare_path_and_storage_options(file_str, download_config=download_config)
+ kwargs = {**kwargs, **(storage_options or {})}
+ try:
+ file_obj = fsspec.open(file, mode=mode, *args, **kwargs).open()
+ except ValueError as e:
+ if str(e) == "Cannot seek streaming HTTP file":
+ raise NonStreamableDatasetError(
+ "Streaming is not possible for this dataset because data host server doesn't support HTTP range "
+ "requests. You can still load this dataset in non-streaming mode by passing `streaming=False` (default)"
+ ) from e
+ else:
+ raise
+ except FileNotFoundError:
+ if file.startswith(config.HF_ENDPOINT):
+ raise FileNotFoundError(
+ file + "\nIf the repo is private or gated, make sure to log in with `huggingface-cli login`."
+ ) from None
+ else:
+ raise
+ file_obj = _add_retries_to_file_obj_read_method(file_obj)
+ return file_obj
+
+
+def xlistdir(path: str, download_config: Optional[DownloadConfig] = None) -> List[str]:
+ """Extend `os.listdir` function to support remote files.
+
+ Args:
+ path (`str`): URL path.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Returns:
+ `list` of `str`
+ """
+ main_hop, *rest_hops = _as_str(path).split("::")
+ if is_local_path(main_hop):
+ return os.listdir(path)
+ else:
+ # globbing inside a zip in a private repo requires authentication
+ path, storage_options = _prepare_path_and_storage_options(path, download_config=download_config)
+ main_hop, *rest_hops = path.split("::")
+ fs, *_ = url_to_fs(path, **storage_options)
+ inner_path = main_hop.split("://")[-1]
+ if inner_path.strip("/") and not fs.isdir(inner_path):
+ raise FileNotFoundError(f"Directory doesn't exist: {path}")
+ paths = fs.listdir(inner_path, detail=False)
+ return [os.path.basename(path.rstrip("/")) for path in paths]
+
+
+def xglob(urlpath, *, recursive=False, download_config: Optional[DownloadConfig] = None):
+ """Extend `glob.glob` function to support remote files.
+
+ Args:
+ urlpath (`str`): URL path with shell-style wildcard patterns.
+ recursive (`bool`, default `False`): Whether to match the "**" pattern recursively to zero or more
+ directories or subdirectories.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Returns:
+ `list` of `str`
+ """
+ main_hop, *rest_hops = _as_str(urlpath).split("::")
+ if is_local_path(main_hop):
+ return glob.glob(main_hop, recursive=recursive)
+ else:
+ # globbing inside a zip in a private repo requires authentication
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
+ main_hop, *rest_hops = urlpath.split("::")
+ fs, *_ = url_to_fs(urlpath, **storage_options)
+ inner_path = main_hop.split("://")[1]
+ globbed_paths = fs.glob(inner_path)
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1]
+ return ["::".join([f"{protocol}://{globbed_path}"] + rest_hops) for globbed_path in globbed_paths]
+
+
+def xwalk(urlpath, download_config: Optional[DownloadConfig] = None, **kwargs):
+ """Extend `os.walk` function to support remote files.
+
+ Args:
+ urlpath (`str`): URL root path.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+ **kwargs: Additional keyword arguments forwarded to the underlying filesystem.
+
+
+ Yields:
+ `tuple`: 3-tuple (dirpath, dirnames, filenames).
+ """
+ main_hop, *rest_hops = _as_str(urlpath).split("::")
+ if is_local_path(main_hop):
+ yield from os.walk(main_hop, **kwargs)
+ else:
+ # walking inside a zip in a private repo requires authentication
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
+ main_hop, *rest_hops = urlpath.split("::")
+ fs, *_ = url_to_fs(urlpath, **storage_options)
+ inner_path = main_hop.split("://")[-1]
+ if inner_path.strip("/") and not fs.isdir(inner_path):
+ return []
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[-1]
+ for dirpath, dirnames, filenames in fs.walk(inner_path, **kwargs):
+ yield "::".join([f"{protocol}://{dirpath}"] + rest_hops), dirnames, filenames
+
+
+class xPath(type(Path())):
+ """Extension of `pathlib.Path` to support both local paths and remote URLs."""
+
+ def __str__(self):
+ path_str = super().__str__()
+ main_hop, *rest_hops = path_str.split("::")
+ if is_local_path(main_hop):
+ return main_hop
+ path_as_posix = path_str.replace("\\", "/")
+ path_as_posix = SINGLE_SLASH_AFTER_PROTOCOL_PATTERN.sub("://", path_as_posix)
+ path_as_posix += "//" if path_as_posix.endswith(":") else "" # Add slashes to root of the protocol
+ return path_as_posix
+
+ def exists(self, download_config: Optional[DownloadConfig] = None):
+ """Extend `pathlib.Path.exists` method to support both local and remote files.
+
+ Args:
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Returns:
+ `bool`
+ """
+ return xexists(str(self), download_config=download_config)
+
+ def glob(self, pattern, download_config: Optional[DownloadConfig] = None):
+ """Glob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
+
+ Args:
+ pattern (`str`): Pattern that resulting paths must match.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Yields:
+ [`xPath`]
+ """
+ posix_path = self.as_posix()
+ main_hop, *rest_hops = posix_path.split("::")
+ if is_local_path(main_hop):
+ yield from Path(main_hop).glob(pattern)
+ else:
+ # globbing inside a zip in a private repo requires authentication
+ if rest_hops:
+ urlpath = rest_hops[0]
+ urlpath, storage_options = _prepare_path_and_storage_options(urlpath, download_config=download_config)
+ storage_options = {urlpath.split("://")[0]: storage_options}
+ posix_path = "::".join([main_hop, urlpath, *rest_hops[1:]])
+ else:
+ storage_options = None
+ fs, *_ = url_to_fs(xjoin(posix_path, pattern), **(storage_options or {}))
+ globbed_paths = fs.glob(xjoin(main_hop, pattern))
+ for globbed_path in globbed_paths:
+ yield type(self)("::".join([f"{fs.protocol}://{globbed_path}"] + rest_hops))
+
+ def rglob(self, pattern, **kwargs):
+ """Rglob function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
+
+ Args:
+ pattern (`str`): Pattern that resulting paths must match.
+
+ Yields:
+ [`xPath`]
+ """
+ return self.glob("**/" + pattern, **kwargs)
+
+ @property
+ def parent(self) -> "xPath":
+ """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
+
+ Returns:
+ [`xPath`]
+ """
+ return type(self)(xdirname(self.as_posix()))
+
+ @property
+ def name(self) -> str:
+ """Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
+
+ Returns:
+ `str`
+ """
+ return PurePosixPath(self.as_posix().split("::")[0]).name
+
+ @property
+ def stem(self) -> str:
+ """Stem function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
+
+ Returns:
+ `str`
+ """
+ return PurePosixPath(self.as_posix().split("::")[0]).stem
+
+ @property
+ def suffix(self) -> str:
+ """Suffix function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.
+
+ Returns:
+ `str`
+ """
+ return PurePosixPath(self.as_posix().split("::")[0]).suffix
+
+ def open(self, *args, **kwargs):
+ """Extend :func:`xopen` to support argument of type :obj:`~pathlib.Path`.
+
+ Args:
+ **args: Arguments passed to :func:`fsspec.open`.
+ **kwargs: Keyword arguments passed to :func:`fsspec.open`.
+
+ Returns:
+ `io.FileIO`: File-like object.
+ """
+ return xopen(str(self), *args, **kwargs)
+
+ def joinpath(self, *p: Tuple[str, ...]) -> "xPath":
+ """Extend :func:`xjoin` to support argument of type :obj:`~pathlib.Path`.
+
+ Args:
+ *p (`tuple` of `str`): Other path components.
+
+ Returns:
+ [`xPath`]
+ """
+ return type(self)(xjoin(self.as_posix(), *p))
+
+ def __truediv__(self, p: str) -> "xPath":
+ return self.joinpath(p)
+
+ def with_suffix(self, suffix):
+ main_hop, *rest_hops = str(self).split("::")
+ if is_local_path(main_hop):
+ return type(self)(str(super().with_suffix(suffix)))
+ return type(self)("::".join([type(self)(PurePosixPath(main_hop).with_suffix(suffix)).as_posix()] + rest_hops))
+
+
+def _as_str(path: Union[str, Path, xPath]):
+ return str(path) if isinstance(path, xPath) else str(xPath(str(path)))
+
+
+def xgzip_open(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs):
+ import gzip
+
+ if hasattr(filepath_or_buffer, "read"):
+ return gzip.open(filepath_or_buffer, *args, **kwargs)
+ else:
+ filepath_or_buffer = str(filepath_or_buffer)
+ return gzip.open(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs)
+
+
+def xnumpy_load(filepath_or_buffer, *args, download_config: Optional[DownloadConfig] = None, **kwargs):
+ import numpy as np
+
+ if hasattr(filepath_or_buffer, "read"):
+ return np.load(filepath_or_buffer, *args, **kwargs)
+ else:
+ filepath_or_buffer = str(filepath_or_buffer)
+ return np.load(xopen(filepath_or_buffer, "rb", download_config=download_config), *args, **kwargs)
+
+
+def xpandas_read_csv(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
+ import pandas as pd
+
+ if hasattr(filepath_or_buffer, "read"):
+ return pd.read_csv(filepath_or_buffer, **kwargs)
+ else:
+ filepath_or_buffer = str(filepath_or_buffer)
+ if kwargs.get("compression", "infer") == "infer":
+ kwargs["compression"] = _get_extraction_protocol(filepath_or_buffer, download_config=download_config)
+ return pd.read_csv(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
+
+
+def xpandas_read_excel(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
+ import pandas as pd
+
+ if hasattr(filepath_or_buffer, "read"):
+ try:
+ return pd.read_excel(filepath_or_buffer, **kwargs)
+ except ValueError: # Cannot seek streaming HTTP file
+ return pd.read_excel(BytesIO(filepath_or_buffer.read()), **kwargs)
+ else:
+ filepath_or_buffer = str(filepath_or_buffer)
+ try:
+ return pd.read_excel(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
+ except ValueError: # Cannot seek streaming HTTP file
+ return pd.read_excel(
+ BytesIO(xopen(filepath_or_buffer, "rb", download_config=download_config).read()), **kwargs
+ )
+
+
+def xpyarrow_parquet_read_table(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
+ import pyarrow.parquet as pq
+
+ if hasattr(filepath_or_buffer, "read"):
+ return pq.read_table(filepath_or_buffer, **kwargs)
+ else:
+ filepath_or_buffer = str(filepath_or_buffer)
+ return pq.read_table(xopen(filepath_or_buffer, mode="rb", download_config=download_config), **kwargs)
+
+
+def xsio_loadmat(filepath_or_buffer, download_config: Optional[DownloadConfig] = None, **kwargs):
+ import scipy.io as sio
+
+ if hasattr(filepath_or_buffer, "read"):
+ return sio.loadmat(filepath_or_buffer, **kwargs)
+ else:
+ return sio.loadmat(xopen(filepath_or_buffer, "rb", download_config=download_config), **kwargs)
+
+
+def xet_parse(source, parser=None, download_config: Optional[DownloadConfig] = None):
+ """Extend `xml.etree.ElementTree.parse` function to support remote files.
+
+ Args:
+ source: File path or file object.
+ parser (`XMLParser`, *optional*, default `XMLParser`): Parser instance.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+
+ Returns:
+ `xml.etree.ElementTree.Element`: Root element of the given source document.
+ """
+ if hasattr(source, "read"):
+ return ET.parse(source, parser=parser)
+ else:
+ with xopen(source, "rb", download_config=download_config) as f:
+ return ET.parse(f, parser=parser)
+
+
+def xxml_dom_minidom_parse(filename_or_file, download_config: Optional[DownloadConfig] = None, **kwargs):
+ """Extend `xml.dom.minidom.parse` function to support remote files.
+
+ Args:
+ filename_or_file (`str` or file): File path or file object.
+ download_config : mainly use token or storage_options to support different platforms and auth types.
+ **kwargs (optional): Additional keyword arguments passed to `xml.dom.minidom.parse`.
+
+ Returns:
+ :obj:`xml.dom.minidom.Document`: Parsed document.
+ """
+ if hasattr(filename_or_file, "read"):
+ return xml.dom.minidom.parse(filename_or_file, **kwargs)
+ else:
+ with xopen(filename_or_file, "rb", download_config=download_config) as f:
+ return xml.dom.minidom.parse(f, **kwargs)
+
+
+class _IterableFromGenerator(TrackedIterable):
+ """Utility class to create an iterable from a generator function, in order to reset the generator when needed."""
+
+ def __init__(self, generator: Callable, *args, **kwargs):
+ super().__init__()
+ self.generator = generator
+ self.args = args
+ self.kwargs = kwargs
+
+ def __iter__(self):
+ for x in self.generator(*self.args, **self.kwargs):
+ self.last_item = x
+ yield x
+ self.last_item = None
+
+
+class ArchiveIterable(_IterableFromGenerator):
+ """An iterable of (path, fileobj) from a TAR archive, used by `iter_archive`"""
+
+ @staticmethod
+ def _iter_tar(f):
+ stream = tarfile.open(fileobj=f, mode="r|*")
+ for tarinfo in stream:
+ file_path = tarinfo.name
+ if not tarinfo.isreg():
+ continue
+ if file_path is None:
+ continue
+ if os.path.basename(file_path).startswith((".", "__")):
+ # skipping hidden files
+ continue
+ file_obj = stream.extractfile(tarinfo)
+ yield file_path, file_obj
+ stream.members = []
+ del stream
+
+ @staticmethod
+ def _iter_zip(f):
+ zipf = zipfile.ZipFile(f)
+ for member in zipf.infolist():
+ file_path = member.filename
+ if member.is_dir():
+ continue
+ if file_path is None:
+ continue
+ if os.path.basename(file_path).startswith((".", "__")):
+ # skipping hidden files
+ continue
+ file_obj = zipf.open(member)
+ yield file_path, file_obj
+
+ @classmethod
+ def _iter_from_fileobj(cls, f) -> Generator[Tuple, None, None]:
+ compression = _get_extraction_protocol_with_magic_number(f)
+ if compression == "zip":
+ yield from cls._iter_zip(f)
+ else:
+ yield from cls._iter_tar(f)
+
+ @classmethod
+ def _iter_from_urlpath(
+ cls, urlpath: str, download_config: Optional[DownloadConfig] = None
+ ) -> Generator[Tuple, None, None]:
+ compression = _get_extraction_protocol(urlpath, download_config=download_config)
+ # Set block_size=0 to get faster streaming
+ # (e.g. for hf:// and https:// it uses streaming Requests file-like instances)
+ with xopen(urlpath, "rb", download_config=download_config, block_size=0) as f:
+ if compression == "zip":
+ yield from cls._iter_zip(f)
+ else:
+ yield from cls._iter_tar(f)
+
+ @classmethod
+ def from_buf(cls, fileobj) -> "ArchiveIterable":
+ return cls(cls._iter_from_fileobj, fileobj)
+
+ @classmethod
+ def from_urlpath(cls, urlpath_or_buf, download_config: Optional[DownloadConfig] = None) -> "ArchiveIterable":
+ return cls(cls._iter_from_urlpath, urlpath_or_buf, download_config)
+
+
+class FilesIterable(_IterableFromGenerator):
+ """An iterable of paths from a list of directories or files"""
+
+ @classmethod
+ def _iter_from_urlpaths(
+ cls, urlpaths: Union[str, List[str]], download_config: Optional[DownloadConfig] = None
+ ) -> Generator[str, None, None]:
+ if not isinstance(urlpaths, list):
+ urlpaths = [urlpaths]
+ for urlpath in urlpaths:
+ if xisfile(urlpath, download_config=download_config):
+ yield urlpath
+ elif xisdir(urlpath, download_config=download_config):
+ for dirpath, dirnames, filenames in xwalk(urlpath, download_config=download_config):
+ # in-place modification to prune the search
+ dirnames[:] = sorted([dirname for dirname in dirnames if not dirname.startswith((".", "__"))])
+ if xbasename(dirpath).startswith((".", "__")):
+ # skipping hidden directories
+ continue
+ for filename in sorted(filenames):
+ if filename.startswith((".", "__")):
+ # skipping hidden files
+ continue
+ yield xjoin(dirpath, filename)
+ else:
+ raise FileNotFoundError(urlpath)
+
+ @classmethod
+ def from_urlpaths(cls, urlpaths, download_config: Optional[DownloadConfig] = None) -> "FilesIterable":
+ return cls(cls._iter_from_urlpaths, urlpaths, download_config)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/filelock.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/filelock.py
new file mode 100644
index 0000000000000000000000000000000000000000..df0728efe644d8eb32f0e578a85e39ba366e9743
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/filelock.py
@@ -0,0 +1,11 @@
+# deprecated, please use the `filelock` package instead
+
+from filelock import ( # noqa: F401 # imported for backward compatibility TODO: remove in 3.0.0
+ BaseFileLock,
+ SoftFileLock,
+ Timeout,
+ UnixFileLock,
+ WindowsFileLock,
+)
+
+from ._filelock import FileLock # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/hub.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/hub.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d784333b23328c113ec7be444d0a77410f1b857
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/hub.py
@@ -0,0 +1,6 @@
+from functools import partial
+
+from huggingface_hub import hf_hub_url
+
+
+hf_dataset_url = partial(hf_hub_url, repo_type="dataset")
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/info_utils.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/info_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4eaa2f0418b2200b9e6714e6697ee68efe753107
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/info_utils.py
@@ -0,0 +1,130 @@
+import enum
+import os
+from typing import Optional
+
+from huggingface_hub.utils import insecure_hashlib
+
+from .. import config
+from .logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+class VerificationMode(enum.Enum):
+ """`Enum` that specifies which verification checks to run.
+
+ The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns
+ when generating/downloading a dataset for the first time.
+
+ The verification modes:
+
+ | | Verification checks |
+ |---------------------------|------------------------------------------------------------------------------ |
+ | `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder |
+ | | and the validity (number of files, checksums, etc.) of downloaded files |
+ | `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files |
+ | `NO_CHECKS` | None |
+
+ """
+
+ ALL_CHECKS = "all_checks"
+ BASIC_CHECKS = "basic_checks"
+ NO_CHECKS = "no_checks"
+
+
+class ChecksumVerificationException(Exception):
+ """Exceptions during checksums verifications of downloaded files."""
+
+
+class UnexpectedDownloadedFile(ChecksumVerificationException):
+ """Some downloaded files were not expected."""
+
+
+class ExpectedMoreDownloadedFiles(ChecksumVerificationException):
+ """Some files were supposed to be downloaded but were not."""
+
+
+class NonMatchingChecksumError(ChecksumVerificationException):
+ """The downloaded file checksum don't match the expected checksum."""
+
+
+def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None):
+ if expected_checksums is None:
+ logger.info("Unable to verify checksums.")
+ return
+ if len(set(expected_checksums) - set(recorded_checksums)) > 0:
+ raise ExpectedMoreDownloadedFiles(str(set(expected_checksums) - set(recorded_checksums)))
+ if len(set(recorded_checksums) - set(expected_checksums)) > 0:
+ raise UnexpectedDownloadedFile(str(set(recorded_checksums) - set(expected_checksums)))
+ bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
+ for_verification_name = " for " + verification_name if verification_name is not None else ""
+ if len(bad_urls) > 0:
+ raise NonMatchingChecksumError(
+ f"Checksums didn't match{for_verification_name}:\n"
+ f"{bad_urls}\n"
+ "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error"
+ )
+ logger.info("All the checksums matched successfully" + for_verification_name)
+
+
+class SplitsVerificationException(Exception):
+ """Exceptions during splis verifications"""
+
+
+class UnexpectedSplits(SplitsVerificationException):
+ """The expected splits of the downloaded file is missing."""
+
+
+class ExpectedMoreSplits(SplitsVerificationException):
+ """Some recorded splits are missing."""
+
+
+class NonMatchingSplitsSizesError(SplitsVerificationException):
+ """The splits sizes don't match the expected splits sizes."""
+
+
+def verify_splits(expected_splits: Optional[dict], recorded_splits: dict):
+ if expected_splits is None:
+ logger.info("Unable to verify splits sizes.")
+ return
+ if len(set(expected_splits) - set(recorded_splits)) > 0:
+ raise ExpectedMoreSplits(str(set(expected_splits) - set(recorded_splits)))
+ if len(set(recorded_splits) - set(expected_splits)) > 0:
+ raise UnexpectedSplits(str(set(recorded_splits) - set(expected_splits)))
+ bad_splits = [
+ {"expected": expected_splits[name], "recorded": recorded_splits[name]}
+ for name in expected_splits
+ if expected_splits[name].num_examples != recorded_splits[name].num_examples
+ ]
+ if len(bad_splits) > 0:
+ raise NonMatchingSplitsSizesError(str(bad_splits))
+ logger.info("All the splits matched successfully.")
+
+
+def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict:
+ """Compute the file size and the sha256 checksum of a file"""
+ if record_checksum:
+ m = insecure_hashlib.sha256()
+ with open(path, "rb") as f:
+ for chunk in iter(lambda: f.read(1 << 20), b""):
+ m.update(chunk)
+ checksum = m.hexdigest()
+ else:
+ checksum = None
+ return {"num_bytes": os.path.getsize(path), "checksum": checksum}
+
+
+def is_small_dataset(dataset_size):
+ """Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
+
+ Args:
+ dataset_size (int): Dataset size in bytes.
+
+ Returns:
+ bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
+ """
+ if dataset_size and config.IN_MEMORY_MAX_SIZE:
+ return dataset_size < config.IN_MEMORY_MAX_SIZE
+ else:
+ return False
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/logging.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..dffd5ce46e0d2da5cbbfb023003c3f4caae86093
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/logging.py
@@ -0,0 +1,179 @@
+# Copyright 2020 Optuna, Hugging Face
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Logging utilities."""
+
+import logging
+import os
+from logging import (
+ CRITICAL, # NOQA
+ DEBUG, # NOQA
+ ERROR, # NOQA
+ FATAL, # NOQA
+ INFO, # NOQA
+ NOTSET, # NOQA
+ WARN, # NOQA
+ WARNING, # NOQA
+)
+from typing import Optional
+
+from .tqdm import ( # noqa: F401 # imported for backward compatibility
+ disable_progress_bar,
+ enable_progress_bar,
+ is_progress_bar_enabled,
+ tqdm,
+)
+
+
+log_levels = {
+ "debug": logging.DEBUG,
+ "info": logging.INFO,
+ "warning": logging.WARNING,
+ "error": logging.ERROR,
+ "critical": logging.CRITICAL,
+}
+
+_default_log_level = logging.WARNING
+
+
+def _get_default_logging_level():
+ """
+ If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level.
+ If it is not - fall back to ``_default_log_level``
+ """
+ env_level_str = os.getenv("DATASETS_VERBOSITY", None)
+ if env_level_str:
+ if env_level_str in log_levels:
+ return log_levels[env_level_str]
+ else:
+ logging.getLogger().warning(
+ f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
+ f"has to be one of: { ', '.join(log_levels.keys()) }"
+ )
+ return _default_log_level
+
+
+def _get_library_name() -> str:
+ return __name__.split(".")[0]
+
+
+def _get_library_root_logger() -> logging.Logger:
+ return logging.getLogger(_get_library_name())
+
+
+def _configure_library_root_logger() -> None:
+ # Apply our default configuration to the library root logger.
+ library_root_logger = _get_library_root_logger()
+ library_root_logger.addHandler(logging.StreamHandler())
+ library_root_logger.setLevel(_get_default_logging_level())
+
+
+def _reset_library_root_logger() -> None:
+ library_root_logger = _get_library_root_logger()
+ library_root_logger.setLevel(logging.NOTSET)
+
+
+def get_logger(name: Optional[str] = None) -> logging.Logger:
+ """Return a logger with the specified name.
+ This function can be used in dataset scripts.
+ """
+ if name is None:
+ name = _get_library_name()
+ return logging.getLogger(name)
+
+
+def get_verbosity() -> int:
+ """Return the current level for the HuggingFace datasets library's root logger.
+ Returns:
+ Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
+
+
+
+ HuggingFace datasets library has following logging levels:
+ - `datasets.logging.CRITICAL`, `datasets.logging.FATAL`
+ - `datasets.logging.ERROR`
+ - `datasets.logging.WARNING`, `datasets.logging.WARN`
+ - `datasets.logging.INFO`
+ - `datasets.logging.DEBUG`
+
+
+ """
+ return _get_library_root_logger().getEffectiveLevel()
+
+
+def set_verbosity(verbosity: int) -> None:
+ """Set the level for the Hugging Face Datasets library's root logger.
+ Args:
+ verbosity:
+ Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
+ """
+ _get_library_root_logger().setLevel(verbosity)
+
+
+def set_verbosity_info():
+ """Set the level for the Hugging Face datasets library's root logger to `INFO`.
+
+ This will display most of the logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`.
+ """
+ return set_verbosity(INFO)
+
+
+def set_verbosity_warning():
+ """Set the level for the Hugging Face datasets library's root logger to `WARNING`.
+
+ This will display only the warning and errors logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`.
+ """
+ return set_verbosity(WARNING)
+
+
+def set_verbosity_debug():
+ """Set the level for the Hugging Face datasets library's root logger to `DEBUG`.
+
+ This will display all the logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`.
+ """
+ return set_verbosity(DEBUG)
+
+
+def set_verbosity_error():
+ """Set the level for the Hugging Face datasets library's root logger to `ERROR`.
+
+ This will display only the errors logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`.
+ """
+ return set_verbosity(ERROR)
+
+
+def disable_propagation() -> None:
+ """Disable propagation of the library log outputs.
+ Note that log propagation is disabled by default.
+ """
+ _get_library_root_logger().propagate = False
+
+
+def enable_propagation() -> None:
+ """Enable propagation of the library log outputs.
+ Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has
+ been configured.
+ """
+ _get_library_root_logger().propagate = True
+
+
+# Configure the library root logger at the module level (singleton-like)
+_configure_library_root_logger()
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/metadata.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2e763e9cb6a150db3c7a6a46b0f60bd7f799b1e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/metadata.py
@@ -0,0 +1,320 @@
+import re
+import textwrap
+from collections import Counter
+from itertools import groupby
+from operator import itemgetter
+from pathlib import Path
+from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union
+
+import yaml
+from huggingface_hub import DatasetCardData
+
+from ..config import METADATA_CONFIGS_FIELD
+from ..info import DatasetInfo, DatasetInfosDict
+from ..naming import _split_re
+from ..utils.logging import get_logger
+from .deprecation_utils import deprecated
+
+
+logger = get_logger(__name__)
+
+
+class _NoDuplicateSafeLoader(yaml.SafeLoader):
+ def _check_no_duplicates_on_constructed_node(self, node):
+ keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
+ keys = [tuple(key) if isinstance(key, list) else key for key in keys]
+ counter = Counter(keys)
+ duplicate_keys = [key for key in counter if counter[key] > 1]
+ if duplicate_keys:
+ raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")
+
+ def construct_mapping(self, node, deep=False):
+ mapping = super().construct_mapping(node, deep=deep)
+ self._check_no_duplicates_on_constructed_node(node)
+ return mapping
+
+
+def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]:
+ full_content = list(readme_content.splitlines())
+ if full_content and full_content[0] == "---" and "---" in full_content[1:]:
+ sep_idx = full_content[1:].index("---") + 1
+ yamlblock = "\n".join(full_content[1:sep_idx])
+ return yamlblock, "\n".join(full_content[sep_idx + 1 :])
+
+ return None, "\n".join(full_content)
+
+
+@deprecated("Use `huggingface_hub.DatasetCardData` instead.")
+class DatasetMetadata(dict):
+ # class attributes
+ _FIELDS_WITH_DASHES = {"train_eval_index"} # train-eval-index in the YAML metadata
+
+ @classmethod
+ def from_readme(cls, path: Union[Path, str]) -> "DatasetMetadata":
+ """Loads and validates the dataset metadata from its dataset card (README.md)
+
+ Args:
+ path (:obj:`Path`): Path to the dataset card (its README.md file)
+
+ Returns:
+ :class:`DatasetMetadata`: The dataset's metadata
+
+ Raises:
+ :obj:`TypeError`: If the dataset's metadata is invalid
+ """
+ with open(path, encoding="utf-8") as readme_file:
+ yaml_string, _ = _split_yaml_from_readme(readme_file.read())
+ if yaml_string is not None:
+ return cls.from_yaml_string(yaml_string)
+ else:
+ return cls()
+
+ def to_readme(self, path: Path):
+ if path.exists():
+ with open(path, encoding="utf-8") as readme_file:
+ readme_content = readme_file.read()
+ else:
+ readme_content = None
+ updated_readme_content = self._to_readme(readme_content)
+ with open(path, "w", encoding="utf-8") as readme_file:
+ readme_file.write(updated_readme_content)
+
+ def _to_readme(self, readme_content: Optional[str] = None) -> str:
+ if readme_content is not None:
+ _, content = _split_yaml_from_readme(readme_content)
+ full_content = "---\n" + self.to_yaml_string() + "---\n" + content
+ else:
+ full_content = "---\n" + self.to_yaml_string() + "---\n"
+ return full_content
+
+ @classmethod
+ def from_yaml_string(cls, string: str) -> "DatasetMetadata":
+ """Loads and validates the dataset metadata from a YAML string
+
+ Args:
+ string (:obj:`str`): The YAML string
+
+ Returns:
+ :class:`DatasetMetadata`: The dataset's metadata
+
+ Raises:
+ :obj:`TypeError`: If the dataset's metadata is invalid
+ """
+ metadata_dict = yaml.load(string, Loader=_NoDuplicateSafeLoader) or {}
+
+ # Convert the YAML keys to DatasetMetadata fields
+ metadata_dict = {
+ (key.replace("-", "_") if key.replace("-", "_") in cls._FIELDS_WITH_DASHES else key): value
+ for key, value in metadata_dict.items()
+ }
+ return cls(**metadata_dict)
+
+ def to_yaml_string(self) -> str:
+ return yaml.safe_dump(
+ {
+ (key.replace("_", "-") if key in self._FIELDS_WITH_DASHES else key): value
+ for key, value in self.items()
+ },
+ sort_keys=False,
+ allow_unicode=True,
+ encoding="utf-8",
+ ).decode("utf-8")
+
+
+class MetadataConfigs(Dict[str, Dict[str, Any]]):
+ """Should be in format {config_name: {**config_params}}."""
+
+ FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD
+
+ @staticmethod
+ def _raise_if_data_files_field_not_valid(metadata_config: dict):
+ yaml_data_files = metadata_config.get("data_files")
+ if yaml_data_files is not None:
+ yaml_error_message = textwrap.dedent(
+ f"""
+ Expected data_files in YAML to be either a string or a list of strings
+ or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files}
+ Examples of data_files in YAML:
+
+ data_files: data.csv
+
+ data_files: data/*.png
+
+ data_files:
+ - part0/*
+ - part1/*
+
+ data_files:
+ - split: train
+ path: train/*
+ - split: test
+ path: test/*
+
+ data_files:
+ - split: train
+ path:
+ - train/part1/*
+ - train/part2/*
+ - split: test
+ path: test/*
+
+ PS: some symbols like dashes '-' are not allowed in split names
+ """
+ )
+ if not isinstance(yaml_data_files, (list, str)):
+ raise ValueError(yaml_error_message)
+ if isinstance(yaml_data_files, list):
+ for yaml_data_files_item in yaml_data_files:
+ if (
+ not isinstance(yaml_data_files_item, (str, dict))
+ or isinstance(yaml_data_files_item, dict)
+ and not (
+ len(yaml_data_files_item) == 2
+ and "split" in yaml_data_files_item
+ and re.match(_split_re, yaml_data_files_item["split"])
+ and isinstance(yaml_data_files_item.get("path"), (str, list))
+ )
+ ):
+ raise ValueError(yaml_error_message)
+
+ @classmethod
+ def _from_exported_parquet_files_and_dataset_infos(
+ cls,
+ revision: str,
+ exported_parquet_files: List[Dict[str, Any]],
+ dataset_infos: DatasetInfosDict,
+ ) -> "MetadataConfigs":
+ metadata_configs = {
+ config_name: {
+ "data_files": [
+ {
+ "split": split_name,
+ "path": [
+ parquet_file["url"].replace("refs%2Fconvert%2Fparquet", revision)
+ for parquet_file in parquet_files_for_split
+ ],
+ }
+ for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split"))
+ ],
+ "version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"),
+ }
+ for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config"))
+ }
+ if dataset_infos:
+ # Preserve order of configs and splits
+ metadata_configs = {
+ config_name: {
+ "data_files": [
+ data_file
+ for split_name in dataset_info.splits
+ for data_file in metadata_configs[config_name]["data_files"]
+ if data_file["split"] == split_name
+ ],
+ "version": metadata_configs[config_name]["version"],
+ }
+ for config_name, dataset_info in dataset_infos.items()
+ }
+ return cls(metadata_configs)
+
+ @classmethod
+ def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs":
+ if dataset_card_data.get(cls.FIELD_NAME):
+ metadata_configs = dataset_card_data[cls.FIELD_NAME]
+ if not isinstance(metadata_configs, list):
+ raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'")
+ for metadata_config in metadata_configs:
+ if "config_name" not in metadata_config:
+ raise ValueError(
+ f"Each config must include `config_name` field with a string name of a config, "
+ f"but got {metadata_config}. "
+ )
+ cls._raise_if_data_files_field_not_valid(metadata_config)
+ return cls(
+ {
+ config["config_name"]: {param: value for param, value in config.items() if param != "config_name"}
+ for config in metadata_configs
+ }
+ )
+ return cls()
+
+ def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
+ if self:
+ for metadata_config in self.values():
+ self._raise_if_data_files_field_not_valid(metadata_config)
+ current_metadata_configs = self.from_dataset_card_data(dataset_card_data)
+ total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items()))
+ for config_name, config_metadata in total_metadata_configs.items():
+ config_metadata.pop("config_name", None)
+ dataset_card_data[self.FIELD_NAME] = [
+ {"config_name": config_name, **config_metadata}
+ for config_name, config_metadata in total_metadata_configs.items()
+ ]
+
+ def get_default_config_name(self) -> Optional[str]:
+ default_config_name = None
+ for config_name, metadata_config in self.items():
+ if len(self) == 1 or config_name == "default" or metadata_config.get("default"):
+ if default_config_name is None:
+ default_config_name = config_name
+ else:
+ raise ValueError(
+ f"Dataset has several default configs: '{default_config_name}' and '{config_name}'."
+ )
+ return default_config_name
+
+
+# DEPRECATED - just here to support old versions of evaluate like 0.2.2
+# To support new tasks on the Hugging Face Hub, please open a PR for this file:
+# https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts
+known_task_ids = {
+ "image-classification": [],
+ "translation": [],
+ "image-segmentation": [],
+ "fill-mask": [],
+ "automatic-speech-recognition": [],
+ "token-classification": [],
+ "sentence-similarity": [],
+ "audio-classification": [],
+ "question-answering": [],
+ "summarization": [],
+ "zero-shot-classification": [],
+ "table-to-text": [],
+ "feature-extraction": [],
+ "other": [],
+ "multiple-choice": [],
+ "text-classification": [],
+ "text-to-image": [],
+ "text2text-generation": [],
+ "zero-shot-image-classification": [],
+ "tabular-classification": [],
+ "tabular-regression": [],
+ "image-to-image": [],
+ "tabular-to-text": [],
+ "unconditional-image-generation": [],
+ "text-retrieval": [],
+ "text-to-speech": [],
+ "object-detection": [],
+ "audio-to-audio": [],
+ "text-generation": [],
+ "conversational": [],
+ "table-question-answering": [],
+ "visual-question-answering": [],
+ "image-to-text": [],
+ "reinforcement-learning": [],
+ "voice-activity-detection": [],
+ "time-series-forecasting": [],
+ "document-question-answering": [],
+}
+
+
+if __name__ == "__main__":
+ from argparse import ArgumentParser
+
+ ap = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
+ ap.add_argument("readme_filepath")
+ args = ap.parse_args()
+
+ readme_filepath = Path(args.readme_filepath)
+ dataset_metadata = DatasetMetadata.from_readme(readme_filepath)
+ print(dataset_metadata)
+ dataset_metadata.to_readme(readme_filepath)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/patching.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/patching.py
new file mode 100644
index 0000000000000000000000000000000000000000..f245cabd97065d9e82a1320d02999f9ec03bda36
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/patching.py
@@ -0,0 +1,119 @@
+from importlib import import_module
+
+from .logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+class _PatchedModuleObj:
+ """Set all the modules components as attributes of the _PatchedModuleObj object."""
+
+ def __init__(self, module, attrs=None):
+ attrs = attrs or []
+ if module is not None:
+ for key in module.__dict__:
+ if key in attrs or not key.startswith("__"):
+ setattr(self, key, getattr(module, key))
+ self._original_module = module._original_module if isinstance(module, _PatchedModuleObj) else module
+
+
+class patch_submodule:
+ """
+ Patch a submodule attribute of an object, by keeping all other submodules intact at all levels.
+
+ Example::
+
+ >>> import importlib
+ >>> from datasets.load import dataset_module_factory
+ >>> from datasets.streaming import patch_submodule, xjoin
+ >>>
+ >>> dataset_module = dataset_module_factory("snli")
+ >>> snli_module = importlib.import_module(dataset_module.module_path)
+ >>> patcher = patch_submodule(snli_module, "os.path.join", xjoin)
+ >>> patcher.start()
+ >>> assert snli_module.os.path.join is xjoin
+ """
+
+ _active_patches = []
+
+ def __init__(self, obj, target: str, new, attrs=None):
+ self.obj = obj
+ self.target = target
+ self.new = new
+ self.key = target.split(".")[0]
+ self.original = {}
+ self.attrs = attrs or []
+
+ def __enter__(self):
+ *submodules, target_attr = self.target.split(".")
+
+ # Patch modules:
+ # it's used to patch attributes of submodules like "os.path.join";
+ # in this case we need to patch "os" and "os.path"
+
+ for i in range(len(submodules)):
+ try:
+ submodule = import_module(".".join(submodules[: i + 1]))
+ except ModuleNotFoundError:
+ continue
+ # We iterate over all the globals in self.obj in case we find "os" or "os.path"
+ for attr in self.obj.__dir__():
+ obj_attr = getattr(self.obj, attr)
+ # We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
+ # This allows to patch renamed modules like "from os import path as ospath".
+ if obj_attr is submodule or (
+ isinstance(obj_attr, _PatchedModuleObj) and obj_attr._original_module is submodule
+ ):
+ self.original[attr] = obj_attr
+ # patch at top level
+ setattr(self.obj, attr, _PatchedModuleObj(obj_attr, attrs=self.attrs))
+ patched = getattr(self.obj, attr)
+ # construct lower levels patches
+ for key in submodules[i + 1 :]:
+ setattr(patched, key, _PatchedModuleObj(getattr(patched, key, None), attrs=self.attrs))
+ patched = getattr(patched, key)
+ # finally set the target attribute
+ setattr(patched, target_attr, self.new)
+
+ # Patch attribute itself:
+ # it's used for builtins like "open",
+ # and also to patch "os.path.join" we may also need to patch "join"
+ # itself if it was imported as "from os.path import join".
+
+ if submodules: # if it's an attribute of a submodule like "os.path.join"
+ try:
+ attr_value = getattr(import_module(".".join(submodules)), target_attr)
+ except (AttributeError, ModuleNotFoundError):
+ return
+ # We iterate over all the globals in self.obj in case we find "os.path.join"
+ for attr in self.obj.__dir__():
+ # We don't check for the name of the global, but rather if its value *is* "os.path.join".
+ # This allows to patch renamed attributes like "from os.path import join as pjoin".
+ if getattr(self.obj, attr) is attr_value:
+ self.original[attr] = getattr(self.obj, attr)
+ setattr(self.obj, attr, self.new)
+ elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
+ self.original[target_attr] = globals()["__builtins__"][target_attr]
+ setattr(self.obj, target_attr, self.new)
+ else:
+ raise RuntimeError(f"Tried to patch attribute {target_attr} instead of a submodule.")
+
+ def __exit__(self, *exc_info):
+ for attr in list(self.original):
+ setattr(self.obj, attr, self.original.pop(attr))
+
+ def start(self):
+ """Activate a patch."""
+ self.__enter__()
+ self._active_patches.append(self)
+
+ def stop(self):
+ """Stop an active patch."""
+ try:
+ self._active_patches.remove(self)
+ except ValueError:
+ # If the patch hasn't been started this will fail
+ return None
+
+ return self.__exit__()
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/py_utils.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/py_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..97dd6fecf523e96b1ff28b62a7c4316b12502239
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/py_utils.py
@@ -0,0 +1,734 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Some python utils function and classes."""
+
+import copy
+import functools
+import itertools
+import multiprocessing.pool
+import os
+import queue
+import re
+import types
+import warnings
+from contextlib import contextmanager
+from dataclasses import fields, is_dataclass
+from multiprocessing import Manager
+from pathlib import Path
+from queue import Empty
+from shutil import disk_usage
+from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union
+from urllib.parse import urlparse
+
+import multiprocess
+import multiprocess.pool
+import numpy as np
+from tqdm.auto import tqdm
+
+from .. import config
+from ..parallel import parallel_map
+from . import logging
+from . import tqdm as hf_tqdm
+from ._dill import ( # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
+ Pickler,
+ dump,
+ dumps,
+ pklregister,
+)
+from ._filelock import FileLock
+
+
+try: # pragma: no branch
+ import typing_extensions as _typing_extensions
+ from typing_extensions import Final, Literal
+except ImportError:
+ _typing_extensions = Literal = Final = None
+
+
+logger = logging.get_logger(__name__)
+
+
+# NOTE: When used on an instance method, the cache is shared across all
+# instances and IS NOT per-instance.
+# See
+# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance
+# For @property methods, use @memoized_property below.
+memoize = functools.lru_cache
+
+
+def size_str(size_in_bytes):
+ """Returns a human readable size string.
+
+ If size_in_bytes is None, then returns "Unknown size".
+
+ For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`.
+
+ Args:
+ size_in_bytes: `int` or `None`, the size, in bytes, that we want to
+ format as a human-readable size string.
+ """
+ if not size_in_bytes:
+ return "Unknown size"
+
+ _NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)]
+
+ size_in_bytes = float(size_in_bytes)
+ for name, size_bytes in _NAME_LIST:
+ value = size_in_bytes / size_bytes
+ if value >= 1.0:
+ return f"{value:.2f} {name}"
+ return f"{int(size_in_bytes)} bytes"
+
+
+def convert_file_size_to_int(size: Union[int, str]) -> int:
+ """
+ Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes).
+
+ Args:
+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
+
+ Example:
+
+ ```py
+ >>> convert_file_size_to_int("1MiB")
+ 1048576
+ ```
+ """
+ if isinstance(size, int):
+ return size
+ if size.upper().endswith("PIB"):
+ return int(size[:-3]) * (2**50)
+ if size.upper().endswith("TIB"):
+ return int(size[:-3]) * (2**40)
+ if size.upper().endswith("GIB"):
+ return int(size[:-3]) * (2**30)
+ if size.upper().endswith("MIB"):
+ return int(size[:-3]) * (2**20)
+ if size.upper().endswith("KIB"):
+ return int(size[:-3]) * (2**10)
+ if size.upper().endswith("PB"):
+ int_size = int(size[:-2]) * (10**15)
+ return int_size // 8 if size.endswith("b") else int_size
+ if size.upper().endswith("TB"):
+ int_size = int(size[:-2]) * (10**12)
+ return int_size // 8 if size.endswith("b") else int_size
+ if size.upper().endswith("GB"):
+ int_size = int(size[:-2]) * (10**9)
+ return int_size // 8 if size.endswith("b") else int_size
+ if size.upper().endswith("MB"):
+ int_size = int(size[:-2]) * (10**6)
+ return int_size // 8 if size.endswith("b") else int_size
+ if size.upper().endswith("KB"):
+ int_size = int(size[:-2]) * (10**3)
+ return int_size // 8 if size.endswith("b") else int_size
+ raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
+
+
+def glob_pattern_to_regex(pattern):
+ # partially taken from fsspec:
+ # https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735
+ return (
+ pattern.replace("\\", r"\\")
+ .replace(".", r"\.")
+ .replace("*", ".*")
+ .replace("+", r"\+")
+ .replace("//", "/")
+ .replace("(", r"\(")
+ .replace(")", r"\)")
+ .replace("|", r"\|")
+ .replace("^", r"\^")
+ .replace("$", r"\$")
+ .rstrip("/")
+ .replace("?", ".")
+ )
+
+
+def string_to_dict(string: str, pattern: str) -> Dict[str, str]:
+ """Un-format a string using a python f-string pattern.
+ From https://stackoverflow.com/a/36838374
+
+ Example::
+
+ >>> p = 'hello, my name is {name} and I am a {age} year old {what}'
+ >>> s = p.format(name='cody', age=18, what='quarterback')
+ >>> s
+ 'hello, my name is cody and I am a 18 year old quarterback'
+ >>> string_to_dict(s, p)
+ {'age': '18', 'name': 'cody', 'what': 'quarterback'}
+
+ Args:
+ string (str): input string
+ pattern (str): pattern formatted like a python f-string
+
+ Returns:
+ Dict[str, str]: dictionary of variable -> value, retrieved from the input using the pattern
+
+ Raises:
+ ValueError: if the string doesn't match the pattern
+ """
+ regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern)
+ result = re.search(regex, string)
+ if result is None:
+ raise ValueError(f"String {string} doesn't match the pattern {pattern}")
+ values = list(result.groups())
+ keys = re.findall(r"{(.+?)}", pattern)
+ _dict = dict(zip(keys, values))
+ return _dict
+
+
+def asdict(obj):
+ """Convert an object to its dictionary representation recursively.
+
+
+ """
+
+ # Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict
+
+ def _is_dataclass_instance(obj):
+ # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass
+ return is_dataclass(obj) and not isinstance(obj, type)
+
+ def _asdict_inner(obj):
+ if _is_dataclass_instance(obj):
+ result = {}
+ for f in fields(obj):
+ value = _asdict_inner(getattr(obj, f.name))
+ if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False):
+ result[f.name] = value
+ return result
+ elif isinstance(obj, tuple) and hasattr(obj, "_fields"):
+ # obj is a namedtuple
+ return type(obj)(*[_asdict_inner(v) for v in obj])
+ elif isinstance(obj, (list, tuple)):
+ # Assume we can create an object of this type by passing in a
+ # generator (which is not true for namedtuples, handled
+ # above).
+ return type(obj)(_asdict_inner(v) for v in obj)
+ elif isinstance(obj, dict):
+ return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()}
+ else:
+ return copy.deepcopy(obj)
+
+ if not isinstance(obj, dict) and not _is_dataclass_instance(obj):
+ raise TypeError(f"{obj} is not a dict or a dataclass")
+
+ return _asdict_inner(obj)
+
+
+@contextmanager
+def temporary_assignment(obj, attr, value):
+ """Temporarily assign obj.attr to value."""
+ original = getattr(obj, attr, None)
+ setattr(obj, attr, value)
+ try:
+ yield
+ finally:
+ setattr(obj, attr, original)
+
+
+@contextmanager
+def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):
+ """Temporarily set the random seed. This works for python numpy, pytorch and tensorflow."""
+ np_state = np.random.get_state()
+ np.random.seed(seed)
+
+ if set_pytorch and config.TORCH_AVAILABLE:
+ import torch
+
+ torch_state = torch.random.get_rng_state()
+ torch.random.manual_seed(seed)
+
+ if torch.cuda.is_available():
+ torch_cuda_states = torch.cuda.get_rng_state_all()
+ torch.cuda.manual_seed_all(seed)
+
+ if set_tensorflow and config.TF_AVAILABLE:
+ import tensorflow as tf
+ from tensorflow.python.eager import context as tfpycontext
+
+ tf_state = tf.random.get_global_generator()
+ temp_gen = tf.random.Generator.from_seed(seed)
+ tf.random.set_global_generator(temp_gen)
+
+ if not tf.executing_eagerly():
+ raise ValueError("Setting random seed for TensorFlow is only available in eager mode")
+
+ tf_context = tfpycontext.context() # eager mode context
+ tf_seed = tf_context._seed
+ tf_rng_initialized = hasattr(tf_context, "_rng")
+ if tf_rng_initialized:
+ tf_rng = tf_context._rng
+ tf_context._set_global_seed(seed)
+
+ try:
+ yield
+ finally:
+ np.random.set_state(np_state)
+
+ if set_pytorch and config.TORCH_AVAILABLE:
+ torch.random.set_rng_state(torch_state)
+ if torch.cuda.is_available():
+ torch.cuda.set_rng_state_all(torch_cuda_states)
+
+ if set_tensorflow and config.TF_AVAILABLE:
+ tf.random.set_global_generator(tf_state)
+
+ tf_context._seed = tf_seed
+ if tf_rng_initialized:
+ tf_context._rng = tf_rng
+ else:
+ delattr(tf_context, "_rng")
+
+
+def unique_values(values):
+ """Iterate over iterable and return only unique values in order."""
+ seen = set()
+ for value in values:
+ if value not in seen:
+ seen.add(value)
+ yield value
+
+
+def no_op_if_value_is_null(func):
+ """If the value is None, return None, else call `func`."""
+
+ def wrapper(value):
+ return func(value) if value is not None else None
+
+ return wrapper
+
+
+def first_non_null_value(iterable):
+ """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index."""
+ for i, value in enumerate(iterable):
+ if value is not None:
+ return i, value
+ return -1, None
+
+
+def zip_dict(*dicts):
+ """Iterate over items of dictionaries grouped by their keys."""
+ for key in unique_values(itertools.chain(*dicts)): # set merge all keys
+ # Will raise KeyError if the dict don't have the same keys
+ yield key, tuple(d[key] for d in dicts)
+
+
+class NonMutableDict(dict):
+ """Dict where keys can only be added but not modified.
+
+ Will raise an error if the user try to overwrite one key. The error message
+ can be customized during construction. It will be formatted using {key} for
+ the overwritten key.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._error_msg = kwargs.pop(
+ "error_msg",
+ "Try to overwrite existing key: {key}",
+ )
+ if kwargs:
+ raise ValueError("NonMutableDict cannot be initialized with kwargs.")
+ super().__init__(*args, **kwargs)
+
+ def __setitem__(self, key, value):
+ if key in self:
+ raise ValueError(self._error_msg.format(key=key))
+ return super().__setitem__(key, value)
+
+ def update(self, other):
+ if any(k in self for k in other):
+ raise ValueError(self._error_msg.format(key=set(self) & set(other)))
+ return super().update(other)
+
+
+class classproperty(property): # pylint: disable=invalid-name
+ """Descriptor to be used as decorator for @classmethods."""
+
+ def __get__(self, obj, objtype=None):
+ return self.fget.__get__(None, objtype)()
+
+
+def _single_map_nested(args):
+ """Apply a function recursively to each element of a nested data struct."""
+ function, data_struct, batched, batch_size, types, rank, disable_tqdm, desc = args
+
+ # Singleton first to spare some computation
+ if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
+ if batched:
+ return function([data_struct])[0]
+ else:
+ return function(data_struct)
+ if (
+ batched
+ and not isinstance(data_struct, dict)
+ and isinstance(data_struct, types)
+ and all(not isinstance(v, (dict, types)) for v in data_struct)
+ ):
+ return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)]
+
+ # Reduce logging to keep things readable in multiprocessing with tqdm
+ if rank is not None and logging.get_verbosity() < logging.WARNING:
+ logging.set_verbosity_warning()
+ # Print at least one thing to fix tqdm in notebooks in multiprocessing
+ # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308
+ if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__):
+ print(" ", end="", flush=True)
+
+ # Loop over single examples or batches and write to buffer/file if examples are to be updated
+ pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct
+ pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc
+ with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar:
+ if isinstance(data_struct, dict):
+ return {
+ k: _single_map_nested((function, v, batched, batch_size, types, None, True, None)) for k, v in pbar
+ }
+ else:
+ mapped = [_single_map_nested((function, v, batched, batch_size, types, None, True, None)) for v in pbar]
+ if isinstance(data_struct, list):
+ return mapped
+ elif isinstance(data_struct, tuple):
+ return tuple(mapped)
+ else:
+ return np.array(mapped)
+
+
+def map_nested(
+ function: Callable[[Any], Any],
+ data_struct: Any,
+ dict_only: bool = False,
+ map_list: bool = True,
+ map_tuple: bool = False,
+ map_numpy: bool = False,
+ num_proc: Optional[int] = None,
+ parallel_min_length: int = 2,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ types: Optional[tuple] = None,
+ disable_tqdm: bool = True,
+ desc: Optional[str] = None,
+) -> Any:
+ """Apply a function recursively to each element of a nested data struct.
+
+ Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to
+ `parallel_min_length`.
+
+
+
+ Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``.
+
+ Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and
+ multiprocessing is used.
+
+
+
+ Args:
+ function (`Callable`): Function to be applied to `data_struct`.
+ data_struct (`Any`): Data structure to apply `function` to.
+ dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in
+ `data_struct`.
+ map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict`
+ values).
+ map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides
+ `dict` values).
+ map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides
+ `dict` values).
+ num_proc (`int`, *optional*): Number of processes.
+ The level in the data struct used for multiprocessing is the first level that has smaller sub-structs,
+ starting from the root.
+ parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel
+ processing.
+
+ batched (`bool`, defaults to `False`):
+ Provide batch of items to `function`.
+
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of items per batch provided to `function` if `batched=True`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`.
+
+ types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their
+ elements.
+ disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar.
+ desc (`str`, *optional*): Prefix for the tqdm progressbar.
+
+ Returns:
+ `Any`
+ """
+ if types is None:
+ types = []
+ if not dict_only:
+ if map_list:
+ types.append(list)
+ if map_tuple:
+ types.append(tuple)
+ if map_numpy:
+ types.append(np.ndarray)
+ types = tuple(types)
+
+ # Singleton
+ if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
+ if batched:
+ data_struct = [data_struct]
+ mapped = function(data_struct)
+ if batched:
+ mapped = mapped[0]
+ return mapped
+
+ iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct
+
+ if num_proc is None:
+ num_proc = 1
+ if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable):
+ mapped = [
+ map_nested(
+ function=function,
+ data_struct=obj,
+ num_proc=num_proc,
+ parallel_min_length=parallel_min_length,
+ batched=batched,
+ batch_size=batch_size,
+ types=types,
+ )
+ for obj in iterable
+ ]
+ elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:
+ if batched:
+ if batch_size is None or batch_size <= 0:
+ batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1)
+ iterable = list(iter_batched(iterable, batch_size))
+ mapped = [
+ _single_map_nested((function, obj, batched, batch_size, types, None, True, None))
+ for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
+ ]
+ if batched:
+ mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]
+ else:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ message=".* is experimental and might be subject to breaking changes in the future\\.$",
+ category=UserWarning,
+ )
+ if batched:
+ if batch_size is None or batch_size <= 0:
+ batch_size = len(iterable) // num_proc + int(len(iterable) % num_proc > 0)
+ iterable = list(iter_batched(iterable, batch_size))
+ mapped = parallel_map(
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, _single_map_nested
+ )
+ if batched:
+ mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]
+
+ if isinstance(data_struct, dict):
+ return dict(zip(data_struct.keys(), mapped))
+ else:
+ if isinstance(data_struct, list):
+ return mapped
+ elif isinstance(data_struct, tuple):
+ return tuple(mapped)
+ else:
+ return np.array(mapped)
+
+
+class NestedDataStructure:
+ def __init__(self, data=None):
+ self.data = data if data is not None else []
+
+ def flatten(self, data=None):
+ data = data if data is not None else self.data
+ if isinstance(data, dict):
+ return self.flatten(list(data.values()))
+ elif isinstance(data, (list, tuple)):
+ return [flattened for item in data for flattened in self.flatten(item)]
+ else:
+ return [data]
+
+
+def has_sufficient_disk_space(needed_bytes, directory="."):
+ try:
+ free_bytes = disk_usage(os.path.abspath(directory)).free
+ except OSError:
+ return True
+ return needed_bytes < free_bytes
+
+
+def _convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
+ """Convert a link to a file on a github repo in a link to the raw github object."""
+ parsed = urlparse(url_path)
+ sub_directory = None
+ if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
+ if "blob" in url_path:
+ if not url_path.endswith(".py"):
+ raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'")
+ url_path = url_path.replace("blob", "raw") # Point to the raw file
+ else:
+ # Parse github url to point to zip
+ github_path = parsed.path[1:]
+ repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
+ repo_owner, repo_name = repo_info.split("/")
+ url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip"
+ sub_directory = f"{repo_name}-{branch}"
+ return url_path, sub_directory
+
+
+def lock_importable_file(importable_local_file: str) -> FileLock:
+ # Check the directory with a unique name in our dataset folder
+ # path is: ./datasets/dataset_name/hash_from_code/script.py
+ # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
+ importable_directory_path = str(Path(importable_local_file).resolve().parent.parent)
+ lock_path = importable_directory_path + ".lock"
+ return FileLock(lock_path)
+
+
+def get_imports(file_path: str) -> Tuple[str, str, str, str]:
+ """Find whether we should import or clone additional files for a given processing script.
+ And list the import.
+
+ We allow:
+ - library dependencies,
+ - local dependencies and
+ - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
+ external dependencies will be downloaded (and extracted if needed in the dataset folder).
+ We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
+
+ Note that only direct import in the dataset processing script will be handled
+ We don't recursively explore the additional import to download further files.
+
+ Example::
+
+ import tensorflow
+ import .c4_utils
+ import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
+ """
+ lines = []
+ with open(file_path, encoding="utf-8") as f:
+ lines.extend(f.readlines())
+
+ logger.debug(f"Checking {file_path} for additional imports.")
+ imports: List[Tuple[str, str, str, Optional[str]]] = []
+ is_in_docstring = False
+ for line in lines:
+ docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
+
+ if len(docstr_start_match) == 1:
+ # flip True <=> False only if doctstring
+ # starts at line without finishing
+ is_in_docstring = not is_in_docstring
+
+ if is_in_docstring:
+ # import statements in doctstrings should
+ # not be added as required dependencies
+ continue
+
+ match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
+ if match is None:
+ match = re.match(
+ r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
+ line,
+ flags=re.MULTILINE,
+ )
+ if match is None:
+ continue
+ if match.group(1):
+ # The import starts with a '.', we will download the relevant file
+ if any(imp[1] == match.group(2) for imp in imports):
+ # We already have this import
+ continue
+ if match.group(3):
+ # The import has a comment with 'From:', we'll retrieve it from the given url
+ url_path = match.group(3)
+ url_path, sub_directory = _convert_github_url(url_path)
+ imports.append(("external", match.group(2), url_path, sub_directory))
+ elif match.group(2):
+ # The import should be at the same place as the file
+ imports.append(("internal", match.group(2), match.group(2), None))
+ else:
+ if match.group(3):
+ # The import has a comment with `From: git+https:...`, asks user to pip install from git.
+ url_path = match.group(3)
+ imports.append(("library", match.group(2), url_path, None))
+ else:
+ imports.append(("library", match.group(2), match.group(2), None))
+
+ return imports
+
+
+def copyfunc(func):
+ result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__)
+ result.__kwdefaults__ = func.__kwdefaults__
+ return result
+
+
+Y = TypeVar("Y")
+
+
+def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int:
+ for i, result in enumerate(func(**kwargs)):
+ queue.put(result)
+ return i
+
+
+def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> Set[int]:
+ return {f.pid for f in pool._pool}
+
+
+def iflatmap_unordered(
+ pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool],
+ func: Callable[..., Iterable[Y]],
+ *,
+ kwargs_iterable: Iterable[dict],
+) -> Iterable[Y]:
+ initial_pool_pid = _get_pool_pid(pool)
+ pool_changed = False
+ manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager
+ with manager_cls() as manager:
+ queue = manager.Queue()
+ async_results = [
+ pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable
+ ]
+ try:
+ while True:
+ try:
+ yield queue.get(timeout=0.05)
+ except Empty:
+ if all(async_result.ready() for async_result in async_results) and queue.empty():
+ break
+ if _get_pool_pid(pool) != initial_pool_pid:
+ pool_changed = True
+ # One of the subprocesses has died. We should not wait forever.
+ raise RuntimeError(
+ "One of the subprocesses has abruptly died during map operation."
+ "To debug the error, disable multiprocessing."
+ )
+ finally:
+ if not pool_changed:
+ # we get the result in case there's an error to raise
+ [async_result.get(timeout=0.05) for async_result in async_results]
+
+
+T = TypeVar("T")
+
+
+def iter_batched(iterable: Iterable[T], n: int) -> Iterable[List[T]]:
+ if n < 1:
+ raise ValueError(f"Invalid batch size {n}")
+ batch = []
+ for item in iterable:
+ batch.append(item)
+ if len(batch) == n:
+ yield batch
+ batch = []
+ if batch:
+ yield batch
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/readme.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/readme.py
new file mode 100644
index 0000000000000000000000000000000000000000..66ed087f7d67181c6840179fa634e8b8e4238f85
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/readme.py
@@ -0,0 +1,277 @@
+# loading package files: https://stackoverflow.com/a/20885799
+import importlib.resources as pkg_resources
+import logging
+from pathlib import Path
+from typing import Any, List, Tuple
+
+import yaml
+
+from . import resources
+from .deprecation_utils import deprecated
+
+
+BASE_REF_URL = "https://github.com/huggingface/datasets/tree/main/src/datasets/utils"
+this_url = f"{BASE_REF_URL}/{__file__}"
+logger = logging.getLogger(__name__)
+
+
+def load_yaml_resource(resource: str) -> Tuple[Any, str]:
+ content = pkg_resources.read_text(resources, resource)
+ return yaml.safe_load(content), f"{BASE_REF_URL}/resources/{resource}"
+
+
+readme_structure, known_readme_structure_url = load_yaml_resource("readme_structure.yaml")
+
+FILLER_TEXT = [
+ "[Needs More Information]",
+ "[More Information Needed]",
+ "(https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)",
+]
+
+# Dictionary representation of section/readme, error_list, warning_list
+ReadmeValidatorOutput = Tuple[dict, List[str], List[str]]
+
+
+class Section:
+ def __init__(self, name: str, level: str, lines: List[str] = None, suppress_parsing_errors: bool = False):
+ self.name = name
+ self.level = level
+ self.lines = lines
+ self.text = ""
+ self.is_empty_text = True
+ self.content = {}
+ self.parsing_error_list = []
+ self.parsing_warning_list = []
+ if self.lines is not None:
+ self.parse(suppress_parsing_errors=suppress_parsing_errors)
+
+ def parse(self, suppress_parsing_errors: bool = False):
+ current_sub_level = ""
+ current_lines = []
+ code_start = False
+ for line in self.lines:
+ if line.strip(" \n") == "":
+ continue
+ elif line.strip(" \n")[:3] == "```":
+ code_start = not code_start
+ elif line.split()[0] == self.level + "#" and not code_start:
+ if current_sub_level != "":
+ self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines)
+ current_lines = []
+ else:
+ if current_lines != []:
+ self.text += "".join(current_lines).strip()
+ if self.text != "" and self.text not in FILLER_TEXT:
+ self.is_empty_text = False
+ current_lines = []
+
+ current_sub_level = " ".join(line.split()[1:]).strip(" \n")
+ else:
+ current_lines.append(line)
+ else:
+ if current_sub_level != "":
+ if current_sub_level in self.content:
+ self.parsing_error_list.append(
+ f"Multiple sections with the same heading `{current_sub_level}` have been found. Please keep only one of these sections."
+ )
+ self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines)
+ else:
+ if current_lines != []:
+ self.text += "".join(current_lines).strip()
+ if self.text != "" and self.text not in FILLER_TEXT:
+ self.is_empty_text = False
+
+ if self.level == "" and not suppress_parsing_errors:
+ if self.parsing_error_list != [] or self.parsing_warning_list != []:
+ errors = errors = "\n".join("-\t" + x for x in self.parsing_error_list + self.parsing_warning_list)
+ error_string = f"The following issues were found while parsing the README at `{self.name}`:\n" + errors
+ raise ValueError(error_string)
+
+ def validate(self, structure: dict) -> ReadmeValidatorOutput:
+ """Validates a Section class object recursively using the structure provided as a dictionary.
+
+ Args:
+ structute (:obj: `dict`): The dictionary representing expected structure.
+
+ Returns:
+ :obj: `ReadmeValidatorOutput`: The dictionary representation of the section, and the errors.
+ """
+ # Header text validation
+ error_list = []
+ warning_list = []
+ if structure["allow_empty"] is False:
+ # If content is expected
+ if self.is_empty_text and self.content == {}:
+ # If no content is found, mention it in the error_list
+ error_list.append(f"Expected some content in section `{self.name}` but it is empty.")
+
+ if structure["allow_empty_text"] is False:
+ # If some text is expected
+ if self.is_empty_text:
+ # If no text is found, mention it in the error_list
+ error_list.append(
+ f"Expected some text in section `{self.name}` but it is empty (text in subsections are ignored)."
+ )
+ # Subsections Validation
+ if structure["subsections"] is not None:
+ # If subsections are expected
+ if self.content == {}:
+ # If no subsections are present
+ values = [subsection["name"] for subsection in structure["subsections"]]
+ # Mention the expected values in the error_list
+ error_list.append(
+ f"Section `{self.name}` expected the following subsections: {', '.join(['`'+x+'`' for x in values])}. Found 'None'."
+ )
+ else:
+ # If some subsections are present
+ structure_names = [subsection["name"] for subsection in structure["subsections"]]
+ has_missing_subsections = False
+ for idx, name in enumerate(structure_names):
+ if name not in self.content:
+ # If the expected subsection is not present
+ error_list.append(f"Section `{self.name}` is missing subsection: `{name}`.")
+ has_missing_subsections = True
+ else:
+ # If the subsection is present, validate subsection, return the result
+ # and concat the errors from subsection to section error_list
+
+ # Skip sublevel validation if current level is `###`
+ if self.level == "###":
+ continue
+ else:
+ _, subsec_error_list, subsec_warning_list = self.content[name].validate(
+ structure["subsections"][idx]
+ )
+ error_list += subsec_error_list
+ warning_list += subsec_warning_list
+
+ if has_missing_subsections: # we only allow to have extra subsections if all the other ones are here
+ for name in self.content:
+ if name not in structure_names:
+ # If an extra subsection is present
+ warning_list.append(
+ f"`{self.name}` has an extra subsection: `{name}`. Skipping further validation checks for this subsection as expected structure is unknown."
+ )
+ if error_list:
+ # If there are errors, do not return the dictionary as it is invalid
+ return {}, error_list, warning_list
+ else:
+ return self.to_dict(), error_list, warning_list
+
+ def to_dict(self) -> dict:
+ """Returns the dictionary representation of a section."""
+ return {
+ "name": self.name,
+ "text": self.text,
+ "is_empty_text": self.is_empty_text,
+ "subsections": [value.to_dict() for value in self.content.values()],
+ }
+
+
+@deprecated("Use `huggingface_hub.DatasetCard` instead.")
+class ReadMe(Section): # Level 0
+ def __init__(self, name: str, lines: List[str], structure: dict = None, suppress_parsing_errors: bool = False):
+ super().__init__(name=name, level="") # Not using lines here as we need to use a child class parse
+ self.structure = structure
+ self.yaml_tags_line_count = -2
+ self.tag_count = 0
+ self.lines = lines
+ if self.lines is not None:
+ self.parse(suppress_parsing_errors=suppress_parsing_errors)
+
+ def validate(self):
+ if self.structure is None:
+ content, error_list, warning_list = self._validate(readme_structure)
+ else:
+ content, error_list, warning_list = self._validate(self.structure)
+ if error_list != [] or warning_list != []:
+ errors = "\n".join(["-\t" + x for x in error_list + warning_list])
+ error_string = f"The following issues were found for the README at `{self.name}`:\n" + errors
+ raise ValueError(error_string)
+
+ @classmethod
+ def from_readme(cls, path: Path, structure: dict = None, suppress_parsing_errors: bool = False):
+ with open(path, encoding="utf-8") as f:
+ lines = f.readlines()
+ return cls(path, lines, structure, suppress_parsing_errors=suppress_parsing_errors)
+
+ @classmethod
+ def from_string(
+ cls, string: str, structure: dict = None, root_name: str = "root", suppress_parsing_errors: bool = False
+ ):
+ lines = string.split("\n")
+ return cls(root_name, lines, structure, suppress_parsing_errors=suppress_parsing_errors)
+
+ def parse(self, suppress_parsing_errors: bool = False):
+ # Skip Tags
+ line_count = 0
+
+ for line in self.lines:
+ self.yaml_tags_line_count += 1
+ if line.strip(" \n") == "---":
+ self.tag_count += 1
+ if self.tag_count == 2:
+ break
+ line_count += 1
+ if self.tag_count == 2:
+ self.lines = self.lines[line_count + 1 :] # Get the last + 1 th item.
+ else:
+ self.lines = self.lines[self.tag_count :]
+ super().parse(suppress_parsing_errors=suppress_parsing_errors)
+
+ def __str__(self):
+ """Returns the string of dictionary representation of the ReadMe."""
+ return str(self.to_dict())
+
+ def _validate(self, readme_structure):
+ error_list = []
+ warning_list = []
+ if self.yaml_tags_line_count == 0:
+ warning_list.append("Empty YAML markers are present in the README.")
+ elif self.tag_count == 0:
+ warning_list.append("No YAML markers are present in the README.")
+ elif self.tag_count == 1:
+ warning_list.append("Only the start of YAML tags present in the README.")
+ # Check how many first level sections are present.
+ num_first_level_keys = len(self.content.keys())
+ if num_first_level_keys > 1:
+ # If more than one, add to the error list, continue
+ error_list.append(
+ f"The README has several first-level headings: {', '.join(['`'+x+'`' for x in list(self.content.keys())])}. Only one heading is expected. Skipping further validation for this README."
+ )
+ elif num_first_level_keys < 1:
+ # If less than one, append error.
+ error_list.append(
+ "The README has no first-level headings. One heading is expected. Skipping further validation for this README."
+ )
+
+ else:
+ # If one exactly
+ start_key = list(self.content.keys())[0] # Get the key
+ if start_key.startswith("Dataset Card for"): # Check correct start
+ # If the starting is correct, validate all the sections
+ _, sec_error_list, sec_warning_list = self.content[start_key].validate(
+ readme_structure["subsections"][0]
+ )
+ error_list += sec_error_list
+ warning_list += sec_warning_list
+ else:
+ # If not found, append error
+ error_list.append(
+ "No first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
+ )
+ if error_list:
+ # If there are errors, do not return the dictionary as it is invalid
+ return {}, error_list, warning_list
+ else:
+ return self.to_dict(), error_list, warning_list
+
+
+if __name__ == "__main__":
+ from argparse import ArgumentParser
+
+ ap = ArgumentParser(usage="Validate the content (excluding YAML tags) of a README.md file.")
+ ap.add_argument("readme_filepath")
+ args = ap.parse_args()
+ readme_filepath = Path(args.readme_filepath)
+ readme = ReadMe.from_readme(readme_filepath)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/creators.json b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/creators.json
new file mode 100644
index 0000000000000000000000000000000000000000..d9e15f0039cc27ed8abd9fdf394423a3fada2c95
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/creators.json
@@ -0,0 +1,17 @@
+{
+ "language": [
+ "found",
+ "crowdsourced",
+ "expert-generated",
+ "machine-generated",
+ "other"
+ ],
+ "annotations": [
+ "found",
+ "crowdsourced",
+ "expert-generated",
+ "machine-generated",
+ "no-annotation",
+ "other"
+ ]
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/multilingualities.json b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/multilingualities.json
new file mode 100644
index 0000000000000000000000000000000000000000..a35c79f03dfcf7c8a116b7fc8ee1b383ab5022fa
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/multilingualities.json
@@ -0,0 +1,6 @@
+{
+ "monolingual": "contains a single language",
+ "multilingual": "contains multiple languages",
+ "translation": "contains translated or aligned text",
+ "other": "other type of language distribution"
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/readme_structure.yaml b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/readme_structure.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5b781e11e8258a446874ebf96104f642d0c190cf
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/readme_structure.yaml
@@ -0,0 +1,116 @@
+name: "" # Filename comes here
+allow_empty: false
+allow_empty_text: true
+subsections:
+ - name: "Dataset Card for X" # First-level markdown heading
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Table of Contents"
+ allow_empty: false
+ allow_empty_text: false
+ subsections: null # meaning it should not be checked.
+ - name: "Dataset Description"
+ allow_empty: false
+ allow_empty_text: false
+ subsections:
+ - name: "Dataset Summary"
+ allow_empty: false
+ allow_empty_text: false
+ subsections: null
+ - name: "Supported Tasks and Leaderboards"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: Languages
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Dataset Structure"
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Data Instances"
+ allow_empty: false
+ allow_empty_text: true
+ subsections: null
+ - name: "Data Fields"
+ allow_empty: false
+ allow_empty_text: true
+ subsections: null
+ - name: "Data Splits"
+ allow_empty: false
+ allow_empty_text: true
+ subsections: null
+ - name: "Dataset Creation"
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Curation Rationale"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Source Data"
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Initial Data Collection and Normalization"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Who are the source language producers?"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Annotations"
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Annotation process"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Who are the annotators?"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Personal and Sensitive Information"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Considerations for Using the Data"
+ allow_empty: true
+ allow_empty_text: true
+ subsections:
+ - name: "Social Impact of Dataset"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Discussion of Biases"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Other Known Limitations"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Additional Information"
+ allow_empty: true
+ allow_empty_text: true
+ subsections:
+ - name: "Dataset Curators"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Licensing Information"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Citation Information"
+ allow_empty: false
+ allow_empty_text: true
+ subsections: null
+ - name: "Contributions"
+ allow_empty: false
+ allow_empty_text: false
+ subsections: null
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/sharding.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/sharding.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ee3133b80ea927a076eebc7eedc2e7b25013ffa
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/sharding.py
@@ -0,0 +1,96 @@
+from typing import List
+
+import numpy as np
+
+
+def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
+ """Return the number of possible shards according to the input gen_kwargs"""
+ # Having lists of different sizes makes sharding ambigious, raise an error in this case
+ # until we decide how to define sharding without ambiguity for users
+ lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}
+ if len(set(lists_lengths.values())) > 1:
+ raise RuntimeError(
+ (
+ "Sharding is ambiguous for this dataset: "
+ + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
+ )
+ )
+ max_length = max(lists_lengths.values(), default=0)
+ return max(1, max_length)
+
+
+def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]:
+ """
+ Get the range of shard indices per job.
+ If num_shards>> _distribute_shards(2, max_num_jobs=4)
+ [range(0, 1), range(1, 2)]
+ >>> _distribute_shards(10, max_num_jobs=3)
+ [range(0, 4), range(4, 7), range(7, 10)]
+ ```
+ """
+ shards_indices_per_group = []
+ for group_idx in range(max_num_jobs):
+ num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
+ if num_shards_to_add == 0:
+ break
+ start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
+ shard_indices = range(start, start + num_shards_to_add)
+ shards_indices_per_group.append(shard_indices)
+ return shards_indices_per_group
+
+
+def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]:
+ """Split the gen_kwargs into `max_num_job` gen_kwargs"""
+ # Having lists of different sizes makes sharding ambigious, raise an error in this case
+ num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)
+ if num_shards == 1:
+ return [dict(gen_kwargs)]
+ else:
+ shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)
+ return [
+ {
+ key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
+ if isinstance(value, list)
+ else value
+ for key, value in gen_kwargs.items()
+ }
+ for group_idx in range(len(shard_indices_per_group))
+ ]
+
+
+def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict:
+ return {
+ key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
+ if isinstance(gen_kwargs_list[0][key], list)
+ else gen_kwargs_list[0][key]
+ for key in gen_kwargs_list[0]
+ }
+
+
+def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:
+ """Return a shuffled copy of the input gen_kwargs"""
+ # We must shuffle all the lists, and lists of the same size must have the same shuffling.
+ # This way entangled lists of (shard, shard_metadata) are still in the right order.
+
+ # First, let's generate the shuffled indices per list size
+ list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)}
+ indices_per_size = {}
+ for size in list_sizes:
+ indices_per_size[size] = list(range(size))
+ rng.shuffle(indices_per_size[size])
+ # Now let's copy the gen_kwargs and shuffle the lists based on their sizes
+ shuffled_kwargs = dict(gen_kwargs)
+ for key, value in shuffled_kwargs.items():
+ if isinstance(value, list):
+ shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
+ return shuffled_kwargs
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/stratify.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/stratify.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0967aa1abb790f741af5ff920c67e615d1b01da
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/stratify.py
@@ -0,0 +1,107 @@
+import numpy as np
+
+
+def approximate_mode(class_counts, n_draws, rng):
+ """Computes approximate mode of multivariate hypergeometric.
+ This is an approximation to the mode of the multivariate
+ hypergeometric given by class_counts and n_draws.
+ It shouldn't be off by more than one.
+ It is the mostly likely outcome of drawing n_draws many
+ samples from the population given by class_counts.
+ Args
+ ----------
+ class_counts : ndarray of int
+ Population per class.
+ n_draws : int
+ Number of draws (samples to draw) from the overall population.
+ rng : random state
+ Used to break ties.
+ Returns
+ -------
+ sampled_classes : ndarray of int
+ Number of samples drawn from each class.
+ np.sum(sampled_classes) == n_draws
+
+ """
+ # this computes a bad approximation to the mode of the
+ # multivariate hypergeometric given by class_counts and n_draws
+ continuous = n_draws * class_counts / class_counts.sum()
+ # floored means we don't overshoot n_samples, but probably undershoot
+ floored = np.floor(continuous)
+ # we add samples according to how much "left over" probability
+ # they had, until we arrive at n_samples
+ need_to_add = int(n_draws - floored.sum())
+ if need_to_add > 0:
+ remainder = continuous - floored
+ values = np.sort(np.unique(remainder))[::-1]
+ # add according to remainder, but break ties
+ # randomly to avoid biases
+ for value in values:
+ (inds,) = np.where(remainder == value)
+ # if we need_to_add less than what's in inds
+ # we draw randomly from them.
+ # if we need to add more, we add them all and
+ # go to the next value
+ add_now = min(len(inds), need_to_add)
+ inds = rng.choice(inds, size=add_now, replace=False)
+ floored[inds] += 1
+ need_to_add -= add_now
+ if need_to_add == 0:
+ break
+ return floored.astype(np.int64)
+
+
+def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10):
+ """
+
+ Provides train/test indices to split data in train/test sets.
+ It's reference is taken from StratifiedShuffleSplit implementation
+ of scikit-learn library.
+
+ Args
+ ----------
+
+ n_train : int,
+ represents the absolute number of train samples.
+
+ n_test : int,
+ represents the absolute number of test samples.
+
+ random_state : int or RandomState instance, default=None
+ Controls the randomness of the training and testing indices produced.
+ Pass an int for reproducible output across multiple function calls.
+
+ n_splits : int, default=10
+ Number of re-shuffling & splitting iterations.
+ """
+ classes, y_indices = np.unique(y, return_inverse=True)
+ n_classes = classes.shape[0]
+ class_counts = np.bincount(y_indices)
+ if np.min(class_counts) < 2:
+ raise ValueError("Minimum class count error")
+ if n_train < n_classes:
+ raise ValueError(
+ "The train_size = %d should be greater or " "equal to the number of classes = %d" % (n_train, n_classes)
+ )
+ if n_test < n_classes:
+ raise ValueError(
+ "The test_size = %d should be greater or " "equal to the number of classes = %d" % (n_test, n_classes)
+ )
+ class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1])
+ for _ in range(n_splits):
+ n_i = approximate_mode(class_counts, n_train, rng)
+ class_counts_remaining = class_counts - n_i
+ t_i = approximate_mode(class_counts_remaining, n_test, rng)
+
+ train = []
+ test = []
+
+ for i in range(n_classes):
+ permutation = rng.permutation(class_counts[i])
+ perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
+ train.extend(perm_indices_class_i[: n_i[i]])
+ test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
+ train = rng.permutation(train)
+ test = rng.permutation(test)
+
+ yield train, test
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/tf_utils.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/tf_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b69f5c85b2c38bb47506a4b2fedb5a69e1d37c00
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/tf_utils.py
@@ -0,0 +1,582 @@
+# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""TF-specific utils import."""
+
+import os
+import warnings
+from functools import partial
+from math import ceil
+from uuid import uuid4
+
+import numpy as np
+import pyarrow as pa
+from multiprocess import get_context
+
+
+try:
+ from multiprocess.shared_memory import SharedMemory
+except ImportError:
+ SharedMemory = None # Version checks should prevent this being called on older Python versions
+
+from .. import config
+
+
+def minimal_tf_collate_fn(features):
+ if isinstance(features, dict): # case batch_size=None: nothing to collate
+ return features
+ elif config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ first = features[0]
+ batch = {}
+ for k, v in first.items():
+ if isinstance(v, np.ndarray):
+ batch[k] = np.stack([f[k] for f in features])
+ elif isinstance(v, tf.Tensor):
+ batch[k] = tf.stack([f[k] for f in features])
+ else:
+ batch[k] = np.array([f[k] for f in features])
+ return batch
+
+
+def minimal_tf_collate_fn_with_renaming(features):
+ batch = minimal_tf_collate_fn(features)
+ if "label" in batch:
+ batch["labels"] = batch["label"]
+ del batch["label"]
+ return batch
+
+
+def is_numeric_pa_type(pa_type):
+ if pa.types.is_list(pa_type):
+ return is_numeric_pa_type(pa_type.value_type)
+ return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type)
+
+
+def is_numeric_feature(feature):
+ from .. import ClassLabel, Sequence, Value
+ from ..features.features import _ArrayXD
+
+ if isinstance(feature, Sequence):
+ return is_numeric_feature(feature.feature)
+ elif isinstance(feature, list):
+ return is_numeric_feature(feature[0])
+ elif isinstance(feature, _ArrayXD):
+ return is_numeric_pa_type(feature().storage_dtype)
+ elif isinstance(feature, Value):
+ return is_numeric_pa_type(feature())
+ elif isinstance(feature, ClassLabel):
+ return True
+ else:
+ return False
+
+
+def np_get_batch(
+ indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False
+):
+ if not isinstance(indices, np.ndarray):
+ indices = indices.numpy()
+
+ is_batched = True
+ # Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices
+ if isinstance(indices, np.integer):
+ batch = dataset[indices.item()]
+ is_batched = False
+ elif np.all(np.diff(indices) == 1):
+ batch = dataset[indices[0] : indices[-1] + 1]
+ elif isinstance(indices, np.ndarray):
+ batch = dataset[indices]
+ else:
+ raise RuntimeError("Unexpected type for indices: {}".format(type(indices)))
+
+ if cols_to_retain is not None:
+ batch = {
+ key: value
+ for key, value in batch.items()
+ if key in cols_to_retain or key in ("label", "label_ids", "labels")
+ }
+
+ if is_batched:
+ actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same
+ # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert
+ batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)]
+ batch = collate_fn(batch, **collate_fn_args)
+
+ if return_dict:
+ out_batch = {}
+ for col, cast_dtype in columns_to_np_types.items():
+ # In case the collate_fn returns something strange
+ array = np.array(batch[col])
+ array = array.astype(cast_dtype)
+ out_batch[col] = array
+ else:
+ out_batch = []
+ for col, cast_dtype in columns_to_np_types.items():
+ # In case the collate_fn returns something strange
+ array = np.array(batch[col])
+ array = array.astype(cast_dtype)
+ out_batch.append(array)
+ return out_batch
+
+
+def dataset_to_tf(
+ dataset,
+ cols_to_retain,
+ collate_fn,
+ collate_fn_args,
+ columns_to_np_types,
+ output_signature,
+ shuffle,
+ batch_size,
+ drop_remainder,
+):
+ """Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess
+ equivalent is multiprocess_dataset_to_tf.
+
+ Args:
+ dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
+ cols_to_retain (`List[str]`): Dataset column(s) to load in the
+ tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
+ that do not exist in the original dataset.
+ collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
+ `collate_fn`. Can be empty.
+ columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
+ output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
+ `tf.TensorSpec` objects.
+ shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
+ validation/evaluation.
+ batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
+ the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
+ drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
+ defaults to the same setting as shuffle.
+
+ Returns:
+ `tf.data.Dataset`
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ # TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything
+ # to the NumPy multiprocessing path.
+ if hasattr(tf, "random_index_shuffle"):
+ random_index_shuffle = tf.random_index_shuffle
+ elif hasattr(tf.random.experimental, "index_shuffle"):
+ random_index_shuffle = tf.random.experimental.index_shuffle
+ else:
+ if len(dataset) > 10_000_000:
+ warnings.warn(
+ "to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. "
+ "If you are iterating over a dataset with a very large number of samples, consider "
+ "upgrading to TF >= 2.9."
+ )
+ random_index_shuffle = None
+
+ getter_fn = partial(
+ np_get_batch,
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ return_dict=False,
+ )
+
+ # This works because dictionaries always output in the same order
+ tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()]
+
+ @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)])
+ def fetch_function(indices):
+ output = tf.py_function(
+ getter_fn,
+ inp=[indices],
+ Tout=tout,
+ )
+ return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())}
+
+ tf_dataset = tf.data.Dataset.range(len(dataset))
+
+ if shuffle and random_index_shuffle is not None:
+ base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64))
+
+ def scan_random_index(state, index):
+ if tf.reduce_all(state == -1):
+ # This generates a new random seed once per epoch only,
+ # to ensure that we iterate over each sample exactly once per epoch
+ state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64)
+ shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1)
+ return state, shuffled_index
+
+ tf_dataset = tf_dataset.scan(base_seed, scan_random_index)
+ elif shuffle:
+ tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality())
+
+ if batch_size is not None:
+ tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder)
+
+ tf_dataset = tf_dataset.map(fetch_function)
+
+ if batch_size is not None:
+
+ def ensure_shapes(input_dict):
+ return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()}
+
+ else:
+ # Ensure shape but remove batch dimension of output_signature[key].shape
+ def ensure_shapes(input_dict):
+ return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()}
+
+ return tf_dataset.map(ensure_shapes)
+
+
+class SharedMemoryContext:
+ # This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted
+ # The process that creates shared memory is always the one responsible for unlinking it in the end
+ def __init__(self):
+ self.created_shms = []
+ self.opened_shms = []
+
+ def get_shm(self, name, size, create):
+ shm = SharedMemory(size=int(size), name=name, create=create)
+ if create:
+ # We only unlink the ones we created in this context
+ self.created_shms.append(shm)
+ else:
+ # If we didn't create it, we only close it when done, we don't unlink it
+ self.opened_shms.append(shm)
+ return shm
+
+ def get_array(self, name, shape, dtype, create):
+ shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create)
+ return np.ndarray(shape, dtype=dtype, buffer=shm.buf)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ for shm in self.created_shms:
+ shm.close()
+ shm.unlink()
+ for shm in self.opened_shms:
+ shm.close()
+
+
+class NumpyMultiprocessingGenerator:
+ def __init__(
+ self,
+ dataset,
+ cols_to_retain,
+ collate_fn,
+ collate_fn_args,
+ columns_to_np_types,
+ output_signature,
+ shuffle,
+ batch_size,
+ drop_remainder,
+ num_workers,
+ ):
+ self.dataset = dataset
+ self.cols_to_retain = cols_to_retain
+ self.collate_fn = collate_fn
+ self.collate_fn_args = collate_fn_args
+ self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype in (np.unicode_, np.str_)]
+ # Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize
+ self.columns_to_np_types = {
+ col: dtype if col not in self.string_columns else np.dtype("U1")
+ for col, dtype in columns_to_np_types.items()
+ }
+ self.output_signature = output_signature
+ self.shuffle = shuffle
+ self.batch_size = batch_size
+ self.drop_remainder = drop_remainder
+ self.num_workers = num_workers
+ # Because strings are converted to characters, we need to add one extra dimension to the shape
+ self.columns_to_ranks = {
+ col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1
+ for col, spec in output_signature.items()
+ }
+
+ def __iter__(self):
+ # Make sure we only spawn workers if they have work to do
+ num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size)))
+ # Do the shuffling in iter so that it's done at the start of each epoch
+ per_worker_batches, final_batch, final_batch_worker = self.distribute_batches(
+ self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle
+ )
+ ctx = get_context("spawn")
+ names = []
+ shape_arrays = []
+ workers = []
+ array_ready_events = [ctx.Event() for _ in range(num_workers)]
+ array_loaded_events = [ctx.Event() for _ in range(num_workers)]
+
+ base_args = {
+ "dataset": self.dataset,
+ "cols_to_retain": self.cols_to_retain,
+ "collate_fn": self.collate_fn,
+ "collate_fn_args": self.collate_fn_args,
+ "columns_to_np_types": self.columns_to_np_types,
+ "columns_to_ranks": self.columns_to_ranks,
+ "string_columns": self.string_columns,
+ }
+ with SharedMemoryContext() as shm_ctx:
+ for i in range(num_workers):
+ worker_random_id = str(uuid4())
+ worker_name = f"dw_{i}_{worker_random_id}"[:10]
+ names.append(worker_name)
+
+ worker_shape_arrays = {
+ col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True)
+ for col, rank in self.columns_to_ranks.items()
+ }
+ shape_arrays.append(worker_shape_arrays)
+
+ worker_indices = per_worker_batches[i]
+ if i == final_batch_worker and final_batch is not None:
+ final_batch_arg = final_batch
+ else:
+ final_batch_arg = None
+ worker_kwargs = {
+ "worker_name": worker_name,
+ "indices": worker_indices,
+ "extra_batch": final_batch_arg,
+ "array_ready_event": array_ready_events[i],
+ "array_loaded_event": array_loaded_events[i],
+ **base_args,
+ }
+ worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True)
+ worker.start()
+ workers.append(worker)
+
+ end_signal_received = False
+ while not end_signal_received:
+ for i in range(num_workers):
+ if not array_ready_events[i].wait(timeout=60):
+ raise TimeoutError("Data loading worker timed out!")
+ array_ready_events[i].clear()
+ array_shapes = shape_arrays[i]
+ if any(np.any(shape < 0) for shape in array_shapes.values()):
+ # Child processes send negative array shapes to indicate
+ # that no more data is going to be sent
+ end_signal_received = True
+ break
+ # Matt: Because array shapes are variable we recreate the shared memory each iteration.
+ # I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process.
+ # A future optimization, at the cost of some code complexity, could be to reuse shared memory
+ # between iterations, but this would require knowing in advance the maximum size, or having
+ # a system to only create a new memory block when a new maximum size is seen.
+ # Another potential optimization would be to figure out which memory copies are necessary,
+ # or whether we can yield objects straight out of shared memory.
+ with SharedMemoryContext() as batch_shm_ctx:
+ # This memory context only lasts long enough to copy everything out of the batch
+ arrays = {
+ col: batch_shm_ctx.get_array(
+ f"{names[i]}_{col}",
+ shape=shape,
+ dtype=self.columns_to_np_types[col],
+ create=False,
+ )
+ for col, shape in array_shapes.items()
+ }
+ # Copy everything out of shm because the memory
+ # will be unlinked by the child process at some point
+ arrays = {col: np.copy(arr) for col, arr in arrays.items()}
+ # Now we convert any unicode char arrays to strings
+ for string_col in self.string_columns:
+ arrays[string_col] = (
+ arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1)
+ )
+ yield arrays
+ array_loaded_events[i].set()
+ # Now we just do some cleanup
+ # Shared memory is cleaned up by the context manager, so we just make sure workers finish
+ for worker in workers:
+ worker.join()
+
+ def __call__(self):
+ return self
+
+ @staticmethod
+ def worker_loop(
+ dataset,
+ cols_to_retain,
+ collate_fn,
+ collate_fn_args,
+ columns_to_np_types,
+ columns_to_ranks,
+ string_columns,
+ indices,
+ extra_batch,
+ worker_name,
+ array_ready_event,
+ array_loaded_event,
+ ):
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
+
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory
+
+ def send_batch_to_parent(indices):
+ batch = np_get_batch(
+ indices=indices,
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ return_dict=True,
+ )
+
+ # Now begins the fun part where we start shovelling shared memory at the parent process
+ out_arrays = {}
+ with SharedMemoryContext() as batch_shm_ctx:
+ # The batch shared memory context exists only as long as it takes for the parent process
+ # to read everything, after which it cleans everything up again
+ for col, cast_dtype in columns_to_np_types.items():
+ # Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor
+ array = batch[col]
+ if col in string_columns:
+ # We can't send unicode arrays over shared memory, so we convert to single chars ("U1")
+ # which have a fixed width of 4 bytes. The parent process will convert these back to strings.
+ array = array.view("U1").reshape(array.shape + (-1,))
+ shape_arrays[col][:] = array.shape
+ out_arrays[col] = batch_shm_ctx.get_array(
+ f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True
+ )
+ out_arrays[col][:] = array
+
+ array_ready_event.set()
+ array_loaded_event.wait()
+ array_loaded_event.clear()
+
+ with SharedMemoryContext() as shm_ctx:
+ shape_arrays = {
+ col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False)
+ for col, rank in columns_to_ranks.items()
+ }
+
+ for batch in indices:
+ send_batch_to_parent(batch)
+ if extra_batch is not None:
+ send_batch_to_parent(extra_batch)
+ # Now we send a batsignal to the parent process that we're done
+ for col, array in shape_arrays.items():
+ array[:] = -1
+ array_ready_event.set()
+
+ @staticmethod
+ def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle):
+ indices = np.arange(len(dataset))
+ if shuffle:
+ np.random.shuffle(indices)
+ num_samples = len(indices)
+ # We distribute the batches so that reading from the workers in round-robin order yields the exact
+ # order specified in indices. This is only important when shuffle is False, but we do it regardless.
+ incomplete_batch_cutoff = num_samples - (num_samples % batch_size)
+ indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff])
+ if drop_remainder or len(last_incomplete_batch) == 0:
+ last_incomplete_batch = None
+
+ indices = indices.reshape(-1, batch_size)
+ num_batches = len(indices)
+ final_batches_cutoff = num_batches - (num_batches % num_workers)
+ indices, final_batches = np.split(indices, [final_batches_cutoff])
+ indices = indices.reshape(-1, num_workers, batch_size)
+
+ per_worker_indices = np.split(indices, indices.shape[1], axis=1)
+ per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices]
+ # Distribute the final batches to the first workers
+ for i in range(len(final_batches)):
+ # len(final_batches) can be zero, and is always less than num_workers
+ per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0)
+ # Add the last incomplete batch to the next worker, which might be the first worker
+ if last_incomplete_batch is not None:
+ incomplete_batch_worker_idx = len(final_batches)
+ else:
+ incomplete_batch_worker_idx = None
+ return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx
+
+
+def multiprocess_dataset_to_tf(
+ dataset,
+ cols_to_retain,
+ collate_fn,
+ collate_fn_args,
+ columns_to_np_types,
+ output_signature,
+ shuffle,
+ batch_size,
+ drop_remainder,
+ num_workers,
+):
+ """Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process
+ equivalent is dataset_to_tf.
+
+ Args:
+ dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
+ cols_to_retain (`List[str]`): Dataset column(s) to load in the
+ tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
+ that do not exist in the original dataset.
+ collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
+ `collate_fn`. Can be empty.
+ columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
+ output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
+ `tf.TensorSpec` objects.
+ shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
+ validation/evaluation.
+ batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
+ the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
+ drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
+ defaults to the same setting as shuffle.
+ num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1.
+
+ Returns:
+ `tf.data.Dataset`
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ data_generator = NumpyMultiprocessingGenerator(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ num_workers=num_workers,
+ )
+
+ tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature)
+ if drop_remainder:
+ dataset_length = int(len(dataset) // batch_size)
+ else:
+ dataset_length = int(ceil(len(dataset) / batch_size))
+ return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length))
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/tqdm.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/tqdm.py
new file mode 100644
index 0000000000000000000000000000000000000000..e28a8ff7ccfda7d4fd9a6195636d181f285ceb65
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/tqdm.py
@@ -0,0 +1,131 @@
+"""Utility helpers to handle progress bars in `datasets`.
+
+Example:
+ 1. Use `datasets.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.
+ 2. To disable progress bars, either use `disable_progress_bars()` helper or set the
+ environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` to 1.
+ 3. To re-enable progress bars, use `enable_progress_bars()`.
+ 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.
+
+NOTE: Environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` has the priority.
+
+Example:
+ ```py
+ from datasets.utils import (
+ are_progress_bars_disabled,
+ disable_progress_bars,
+ enable_progress_bars,
+ tqdm,
+ )
+
+ # Disable progress bars globally
+ disable_progress_bars()
+
+ # Use as normal `tqdm`
+ for _ in tqdm(range(5)):
+ do_something()
+
+ # Still not showing progress bars, as `disable=False` is overwritten to `True`.
+ for _ in tqdm(range(5), disable=False):
+ do_something()
+
+ are_progress_bars_disabled() # True
+
+ # Re-enable progress bars globally
+ enable_progress_bars()
+
+ # Progress bar will be shown !
+ for _ in tqdm(range(5)):
+ do_something()
+ ```
+"""
+
+import warnings
+
+from tqdm.auto import tqdm as old_tqdm
+
+from ..config import HF_DATASETS_DISABLE_PROGRESS_BARS
+
+
+# `HF_DATASETS_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_datasets_progress_bars_disabled`
+# is a `bool`. If `HF_DATASETS_DISABLE_PROGRESS_BARS` is set to True or False, it has priority.
+# If `HF_DATASETS_DISABLE_PROGRESS_BARS` is None, it means the user have not set the
+# environment variable and is free to enable/disable progress bars programmatically.
+# TL;DR: env variable has priority over code.
+#
+# By default, progress bars are enabled.
+_hf_datasets_progress_bars_disabled: bool = HF_DATASETS_DISABLE_PROGRESS_BARS or False
+
+
+def disable_progress_bars() -> None:
+ """
+ Disable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
+ variable has been set.
+
+ Use [`~utils.enable_progress_bars`] to re-enable them.
+ """
+ if HF_DATASETS_DISABLE_PROGRESS_BARS is False:
+ warnings.warn(
+ "Cannot disable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=0` is set and has"
+ " priority."
+ )
+ return
+ global _hf_datasets_progress_bars_disabled
+ _hf_datasets_progress_bars_disabled = True
+
+
+def enable_progress_bars() -> None:
+ """
+ Enable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
+ variable has been set.
+
+ Use [`~utils.disable_progress_bars`] to disable them.
+ """
+ if HF_DATASETS_DISABLE_PROGRESS_BARS is True:
+ warnings.warn(
+ "Cannot enable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=1` is set and has"
+ " priority."
+ )
+ return
+ global _hf_datasets_progress_bars_disabled
+ _hf_datasets_progress_bars_disabled = False
+
+
+def are_progress_bars_disabled() -> bool:
+ """Return whether progress bars are globally disabled or not.
+
+ Progress bars used in `datasets` can be enable or disabled globally using [`~utils.enable_progress_bars`]
+ and [`~utils.disable_progress_bars`] or by setting `HF_DATASETS_DISABLE_PROGRESS_BAR` as environment variable.
+ """
+ global _hf_datasets_progress_bars_disabled
+ return _hf_datasets_progress_bars_disabled
+
+
+class tqdm(old_tqdm):
+ """
+ Class to override `disable` argument in case progress bars are globally disabled.
+
+ Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
+ """
+
+ def __init__(self, *args, **kwargs):
+ if are_progress_bars_disabled():
+ kwargs["disable"] = True
+ super().__init__(*args, **kwargs)
+
+ def __delattr__(self, attr: str) -> None:
+ """Fix for https://github.com/huggingface/datasets/issues/6066"""
+ try:
+ super().__delattr__(attr)
+ except AttributeError:
+ if attr != "_lock":
+ raise
+
+
+# backward compatibility
+enable_progress_bar = enable_progress_bars
+disable_progress_bar = disable_progress_bars
+
+
+def is_progress_bar_enabled():
+ return not are_progress_bars_disabled()
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/track.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/track.py
new file mode 100644
index 0000000000000000000000000000000000000000..11a3787c7d8595cc7160994973f28db1f709b3b2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/track.py
@@ -0,0 +1,49 @@
+from collections.abc import Iterator
+from typing import Iterable
+
+
+class tracked_str(str):
+ origins = {}
+
+ def set_origin(self, origin: str):
+ if super().__repr__() not in self.origins:
+ self.origins[super().__repr__()] = origin
+
+ def get_origin(self):
+ return self.origins.get(super().__repr__(), str(self))
+
+ def __repr__(self) -> str:
+ if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self:
+ return super().__repr__()
+ else:
+ return f"{str(self)} (origin={self.origins[super().__repr__()]})"
+
+
+class tracked_list(list):
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+ self.last_item = None
+
+ def __iter__(self) -> Iterator:
+ for x in super().__iter__():
+ self.last_item = x
+ yield x
+ self.last_item = None
+
+ def __repr__(self) -> str:
+ if self.last_item is None:
+ return super().__repr__()
+ else:
+ return f"{self.__class__.__name__}(current={self.last_item})"
+
+
+class TrackedIterable(Iterable):
+ def __init__(self) -> None:
+ super().__init__()
+ self.last_item = None
+
+ def __repr__(self) -> str:
+ if self.last_item is None:
+ super().__repr__()
+ else:
+ return f"{self.__class__.__name__}(current={self.last_item})"
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/typing.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/typing.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ad087fc98d2a6de2d3e493120135fc9ea49e605
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/typing.py
@@ -0,0 +1,9 @@
+import os
+from typing import Dict, List, Tuple, TypeVar, Union
+
+
+T = TypeVar("T")
+
+ListLike = Union[List[T], Tuple[T, ...]]
+NestedDataStructureLike = Union[T, List[T], Dict[str, T]]
+PathLike = Union[str, bytes, os.PathLike]
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/version.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..75cf4c39d5f9b916ade8f8d88a0f0ca9e5769217
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/version.py
@@ -0,0 +1,106 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Version utils."""
+
+import dataclasses
+import re
+from dataclasses import dataclass
+from functools import total_ordering
+from typing import Optional, Union
+
+
+_VERSION_REG = re.compile(r"^(?P\d+)" r"\.(?P\d+)" r"\.(?P\d+)$")
+
+
+@total_ordering
+@dataclass
+class Version:
+ """Dataset version `MAJOR.MINOR.PATCH`.
+
+ Args:
+ version_str (`str`):
+ The dataset version.
+ description (`str`):
+ A description of what is new in this version.
+ major (`str`):
+ minor (`str`):
+ patch (`str`):
+
+ Example:
+
+ ```py
+ >>> VERSION = datasets.Version("1.0.0")
+ ```
+ """
+
+ version_str: str
+ description: Optional[str] = None
+ major: Optional[Union[str, int]] = None
+ minor: Optional[Union[str, int]] = None
+ patch: Optional[Union[str, int]] = None
+
+ def __post_init__(self):
+ self.major, self.minor, self.patch = _str_to_version_tuple(self.version_str)
+
+ def __repr__(self):
+ return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
+
+ @property
+ def tuple(self):
+ return self.major, self.minor, self.patch
+
+ def _validate_operand(self, other):
+ if isinstance(other, str):
+ return Version(other)
+ elif isinstance(other, Version):
+ return other
+ raise TypeError(f"{other} (type {type(other)}) cannot be compared to version.")
+
+ def __eq__(self, other):
+ try:
+ other = self._validate_operand(other)
+ except (TypeError, ValueError):
+ return False
+ else:
+ return self.tuple == other.tuple
+
+ def __lt__(self, other):
+ other = self._validate_operand(other)
+ return self.tuple < other.tuple
+
+ def __hash__(self):
+ return hash(_version_tuple_to_str(self.tuple))
+
+ @classmethod
+ def from_dict(cls, dic):
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in dic.items() if k in field_names})
+
+ def _to_yaml_string(self) -> str:
+ return self.version_str
+
+
+def _str_to_version_tuple(version_str):
+ """Return the tuple (major, minor, patch) version extracted from the str."""
+ res = _VERSION_REG.match(version_str)
+ if not res:
+ raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.")
+ return tuple(int(v) for v in [res.group("major"), res.group("minor"), res.group("patch")])
+
+
+def _version_tuple_to_str(version_tuple):
+ """Return the str version from the version tuple (major, minor, patch)."""
+ return ".".join(str(v) for v in version_tuple)
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..9fd53a985e58845db8476af7881c1a4bcf89b2fa
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1031461363c3772a9a32030693341b112093ff989eab6e3d04213d6873c235d9
+size 1027824
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..8c696f55cf7305aed53fd1d267ea282fc8347cf0
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d2a17bea265bea483aa6953d1bf75f4f283d5ca3c93547efcdc84242cad54baa
+size 1163696
diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/spatial/tests/data/degenerate_pointset.npz b/llmeval-env/lib/python3.10/site-packages/scipy/spatial/tests/data/degenerate_pointset.npz
new file mode 100644
index 0000000000000000000000000000000000000000..4f22bd3a3c941a683747944a0f12c7914f4b3f07
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/scipy/spatial/tests/data/degenerate_pointset.npz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:048abc1ddd924bf2d4d1f216015552ed9431f9e99546fbf382768eda58788175
+size 22548