diff --git a/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d66164e9e1d586bc03bd6eba52300a5e54ee53ff
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f90617ba006aed49375457946eab401c0c7ee4953e2214115c931ca580cfca89
+size 33555627
diff --git a/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..e4e50a4b3b8f3de84fa272b91eab02f50259cffb
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77fc38238a48dd4480293df4bf9feae153c0490b3bcd8cf9932f43d8c676516a
+size 9372
diff --git a/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c3035613af90ea3dcc3eae5f349a2b5d5d2e251e
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/14.post_attention_layernorm.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:adc3909ccbac7d2fad5193d34924ce0b6a18673a28ffbd4d854aee83506cf9e2
+size 9293
diff --git a/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..19144c69b4e760fc05b2ede90da4e735053925bd
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f247f9cad8f6a73ab8bda6461a4a4c3f94166313820616b88aa28684e183f38
+size 33555627
diff --git a/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b400092baf33108cdf59ecc023bfa25191913d63
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d00e1626357023b7d7af5969a0ca7c09611c5aa31e7abd528e8486ae0e0db25
+size 33555533
diff --git a/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..91832be92ca12b66a502dee833bba925dbd25006
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b5a6c2d362fd03c23d099b511d7d4c92c5f62c53c6c6877e40582573ae408b7
+size 50332828
diff --git a/venv/lib/python3.10/site-packages/datasets/__init__.py b/venv/lib/python3.10/site-packages/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e075270f86045dfc03412184208bb5cebb8dd0c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/__init__.py
@@ -0,0 +1,70 @@
+# ruff: noqa
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "2.19.0"
+
+from .arrow_dataset import Dataset
+from .arrow_reader import ReadInstruction
+from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
+from .combine import concatenate_datasets, interleave_datasets
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download import *
+from .features import *
+from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
+from .info import DatasetInfo, MetricInfo
+from .inspect import (
+ get_dataset_config_info,
+ get_dataset_config_names,
+ get_dataset_default_config_name,
+ get_dataset_infos,
+ get_dataset_split_names,
+ inspect_dataset,
+ inspect_metric,
+ list_datasets,
+ list_metrics,
+)
+from .iterable_dataset import IterableDataset
+from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
+from .metric import Metric
+from .splits import (
+ NamedSplit,
+ NamedSplitAll,
+ Split,
+ SplitBase,
+ SplitDict,
+ SplitGenerator,
+ SplitInfo,
+ SubSplitInfo,
+ percent,
+)
+from .tasks import *
+from .utils import *
+from .utils import logging
+
+
+# deprecated modules
+from datasets import arrow_dataset as _arrow_dataset # isort:skip
+from datasets import utils as _utils # isort:skip
+from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
+
+_arrow_dataset.concatenate_datasets = concatenate_datasets
+_utils.DownloadConfig = DownloadConfig
+_utils.DownloadManager = DownloadManager
+_utils.DownloadMode = DownloadMode
+_deprecated_download_manager.DownloadConfig = DownloadConfig
+_deprecated_download_manager.DownloadMode = DownloadMode
+_deprecated_download_manager.DownloadManager = DownloadManager
+
+del _arrow_dataset, _utils, _deprecated_download_manager
diff --git a/venv/lib/python3.10/site-packages/datasets/arrow_dataset.py b/venv/lib/python3.10/site-packages/datasets/arrow_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..686f2e37121f487e2ebb91477f47eff73d5a3b99
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/arrow_dataset.py
@@ -0,0 +1,6495 @@
+# Copyright 2020 The HuggingFace Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Simple Dataset wrapping an Arrow Table."""
+
+import contextlib
+import copy
+import fnmatch
+import itertools
+import json
+import math
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tempfile
+import time
+import warnings
+import weakref
+from collections import Counter
+from collections.abc import Mapping
+from copy import deepcopy
+from functools import partial, wraps
+from io import BytesIO
+from math import ceil, floor
+from pathlib import Path
+from random import sample
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ BinaryIO,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ overload,
+)
+from typing import Sequence as Sequence_
+
+import fsspec
+import numpy as np
+import pandas as pd
+import pyarrow as pa
+import pyarrow.compute as pc
+from fsspec.core import url_to_fs
+from huggingface_hub import (
+ CommitInfo,
+ CommitOperationAdd,
+ CommitOperationDelete,
+ DatasetCard,
+ DatasetCardData,
+ HfApi,
+)
+from huggingface_hub.hf_api import RepoFile
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from . import config
+from .arrow_reader import ArrowReader
+from .arrow_writer import ArrowWriter, OptimizedTypedSequence
+from .data_files import sanitize_patterns
+from .download.streaming_download_manager import xgetsize
+from .features import Audio, ClassLabel, Features, Image, Sequence, Value
+from .features.features import (
+ FeatureType,
+ _align_features,
+ _check_if_features_can_be_aligned,
+ generate_from_arrow_type,
+ pandas_types_mapper,
+ require_decoding,
+)
+from .filesystems import is_remote_filesystem
+from .fingerprint import (
+ fingerprint_transform,
+ format_kwargs_for_fingerprint,
+ format_transform_for_fingerprint,
+ generate_fingerprint,
+ generate_random_fingerprint,
+ get_temporary_cache_files_directory,
+ is_caching_enabled,
+ maybe_register_dataset_for_temp_dir_deletion,
+ update_fingerprint,
+ validate_fingerprint,
+)
+from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table
+from .formatting.formatting import LazyDict, _is_range_contiguous
+from .info import DatasetInfo, DatasetInfosDict
+from .naming import _split_re
+from .search import IndexableMixin
+from .splits import NamedSplit, Split, SplitDict, SplitInfo
+from .table import (
+ InMemoryTable,
+ MemoryMappedTable,
+ Table,
+ _memory_mapped_record_batch_reader_from_file,
+ cast_array_to_feature,
+ concat_tables,
+ embed_table_storage,
+ list_table_cache_files,
+ table_cast,
+ table_iter,
+ table_visitor,
+)
+from .tasks import TaskTemplate
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import estimate_dataset_size
+from .utils.info_utils import is_small_dataset
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import (
+ Literal,
+ asdict,
+ convert_file_size_to_int,
+ glob_pattern_to_regex,
+ iflatmap_unordered,
+ string_to_dict,
+ unique_values,
+)
+from .utils.stratify import stratified_shuffle_split_generate_indices
+from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf
+from .utils.typing import ListLike, PathLike
+
+
+if TYPE_CHECKING:
+ import sqlite3
+
+ import polars as pl
+ import pyspark
+ import sqlalchemy
+
+ from .dataset_dict import DatasetDict
+ from .iterable_dataset import IterableDataset
+
+logger = logging.get_logger(__name__)
+
+PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED = (
+ "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.parquet"
+)
+
+
+class DatasetInfoMixin:
+ """This base class exposes some attributes of DatasetInfo
+ at the base level of the Dataset for easy access.
+ """
+
+ def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]):
+ self._info = info
+ self._split = split
+
+ @property
+ def info(self):
+ """[`~datasets.DatasetInfo`] object containing all the metadata in the dataset."""
+ return self._info
+
+ @property
+ def split(self):
+ """[`~datasets.NamedSplit`] object corresponding to a named dataset split."""
+ return self._split
+
+ @property
+ def builder_name(self) -> str:
+ return self._info.builder_name
+
+ @property
+ def citation(self) -> str:
+ return self._info.citation
+
+ @property
+ def config_name(self) -> str:
+ return self._info.config_name
+
+ @property
+ def dataset_size(self) -> Optional[int]:
+ return self._info.dataset_size
+
+ @property
+ def description(self) -> str:
+ return self._info.description
+
+ @property
+ def download_checksums(self) -> Optional[dict]:
+ return self._info.download_checksums
+
+ @property
+ def download_size(self) -> Optional[int]:
+ return self._info.download_size
+
+ @property
+ def features(self) -> Optional[Features]:
+ return self._info.features.copy() if self._info.features is not None else None
+
+ @property
+ def homepage(self) -> Optional[str]:
+ return self._info.homepage
+
+ @property
+ def license(self) -> Optional[str]:
+ return self._info.license
+
+ @property
+ def size_in_bytes(self) -> Optional[int]:
+ return self._info.size_in_bytes
+
+ @property
+ def supervised_keys(self):
+ return self._info.supervised_keys
+
+ @property
+ def task_templates(self):
+ return self._info.task_templates
+
+ @property
+ def version(self):
+ return self._info.version
+
+
+class TensorflowDatasetMixin:
+ _TF_DATASET_REFS = set()
+
+ @staticmethod
+ def _get_output_signature(
+ dataset: "Dataset",
+ collate_fn: Callable,
+ collate_fn_args: dict,
+ cols_to_retain: Optional[List[str]] = None,
+ batch_size: Optional[int] = None,
+ num_test_batches: int = 20,
+ ):
+ """Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset
+ after being passed through the collate_fn. Tensorflow needs an exact signature for tf.numpy_function, so
+ the only way to do this is to run test batches - the collator may add or rename columns, so we can't figure
+ it out just by inspecting the dataset.
+
+ Args:
+ dataset (`Dataset`): Dataset to load samples from.
+ collate_fn(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
+ validation/evaluation.
+ collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
+ `collate_fn`.
+ batch_size (`int`, optional): The size of batches loaded from the dataset. Used for shape inference.
+ Can be None, which indicates that batch sizes can be variable.
+ num_test_batches (`int`): The number of batches to load from the dataset for shape inference.
+
+ Returns:
+ `dict`: Dict mapping column names to tf.Tensorspec objects
+ `dict`: Dict mapping column names to np.dtype objects
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ if len(dataset) == 0:
+ raise ValueError("Unable to get the output signature because the dataset is empty.")
+ if batch_size is not None:
+ batch_size = min(len(dataset), batch_size)
+ test_batch_size = 1
+
+ if cols_to_retain is not None:
+ cols_to_retain = list(set(cols_to_retain + ["label_ids", "label", "labels"]))
+
+ test_batches = []
+ for _ in range(num_test_batches):
+ indices = sample(range(len(dataset)), test_batch_size)
+ test_batch = dataset[indices]
+ if cols_to_retain is not None:
+ test_batch = {key: value for key, value in test_batch.items() if key in cols_to_retain}
+ test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)]
+ test_batch = collate_fn(test_batch, **collate_fn_args)
+ test_batches.append(test_batch)
+
+ tf_columns_to_signatures = {}
+ np_columns_to_dtypes = {}
+ for column in test_batches[0].keys():
+ raw_arrays = [batch[column] for batch in test_batches]
+ # In case the collate_fn returns something strange
+ np_arrays = []
+ for array in raw_arrays:
+ if isinstance(array, np.ndarray):
+ np_arrays.append(array)
+ elif isinstance(array, tf.Tensor):
+ np_arrays.append(array.numpy())
+ else:
+ np_arrays.append(np.array(array))
+
+ if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool:
+ tf_dtype = tf.int64
+ np_dtype = np.int64
+ elif np.issubdtype(np_arrays[0].dtype, np.number):
+ tf_dtype = tf.float32
+ np_dtype = np.float32
+ elif np_arrays[0].dtype.kind == "U": # Unicode strings
+ np_dtype = np.unicode_
+ tf_dtype = tf.string
+ else:
+ raise RuntimeError(
+ f"Unrecognized array dtype {np_arrays[0].dtype}. \n"
+ "Nested types and image/audio types are not supported yet."
+ )
+ shapes = [array.shape for array in np_arrays]
+ static_shape = []
+ for dim in range(len(shapes[0])):
+ sizes = {shape[dim] for shape in shapes}
+ if dim == 0:
+ static_shape.append(batch_size)
+ continue
+ if len(sizes) == 1: # This dimension looks constant
+ static_shape.append(sizes.pop())
+ else: # Use None for variable dimensions
+ static_shape.append(None)
+ tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype)
+ np_columns_to_dtypes[column] = np_dtype
+
+ return tf_columns_to_signatures, np_columns_to_dtypes
+
+ def to_tf_dataset(
+ self,
+ batch_size: Optional[int] = None,
+ columns: Optional[Union[str, List[str]]] = None,
+ shuffle: bool = False,
+ collate_fn: Optional[Callable] = None,
+ drop_remainder: bool = False,
+ collate_fn_args: Optional[Dict[str, Any]] = None,
+ label_cols: Optional[Union[str, List[str]]] = None,
+ prefetch: bool = True,
+ num_workers: int = 0,
+ num_test_batches: int = 20,
+ ):
+ """Create a `tf.data.Dataset` from the underlying Dataset. This `tf.data.Dataset` will load and collate batches from
+ the Dataset, and is suitable for passing to methods like `model.fit()` or `model.predict()`. The dataset will yield
+ `dicts` for both inputs and labels unless the `dict` would contain only a single key, in which case a raw
+ `tf.Tensor` is yielded instead.
+
+ Args:
+ batch_size (`int`, *optional*):
+ Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be
+ batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
+ columns (`List[str]` or `str`, *optional*):
+ Dataset column(s) to load in the `tf.data.Dataset`.
+ Column names that are created by the `collate_fn` and that do not exist in the original dataset can be used.
+ shuffle(`bool`, defaults to `False`):
+ Shuffle the dataset order when loading. Recommended `True` for training, `False` for
+ validation/evaluation.
+ drop_remainder(`bool`, defaults to `False`):
+ Drop the last incomplete batch when loading. Ensures
+ that all batches yielded by the dataset will have the same length on the batch dimension.
+ collate_fn(`Callable`, *optional*):
+ A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`, *optional*):
+ An optional `dict` of keyword arguments to be passed to the
+ `collate_fn`.
+ label_cols (`List[str]` or `str`, defaults to `None`):
+ Dataset column(s) to load as labels.
+ Note that many models compute loss internally rather than letting Keras do it, in which case
+ passing the labels here is optional, as long as they're in the input `columns`.
+ prefetch (`bool`, defaults to `True`):
+ Whether to run the dataloader in a separate thread and maintain
+ a small buffer of batches for training. Improves performance by allowing data to be loaded in the
+ background while the model is training.
+ num_workers (`int`, defaults to `0`):
+ Number of workers to use for loading the dataset. Only supported on Python versions >= 3.8.
+ num_test_batches (`int`, defaults to `20`):
+ Number of batches to use to infer the output signature of the dataset.
+ The higher this number, the more accurate the signature will be, but the longer it will take to
+ create the dataset.
+
+ Returns:
+ `tf.data.Dataset`
+
+ Example:
+
+ ```py
+ >>> ds_train = ds["train"].to_tf_dataset(
+ ... columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ ... shuffle=True,
+ ... batch_size=16,
+ ... collate_fn=data_collator,
+ ... )
+ ```
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ if (isinstance(columns, list) and len(columns) == 1) or (
+ isinstance(label_cols, list) and len(label_cols) == 1
+ ):
+ warnings.warn(
+ "The output of `to_tf_dataset` will change when a passing single element list for `labels` or "
+ "`columns` in the next datasets version. To return a tuple structure rather than dict, pass a "
+ "single string.\n"
+ "Old behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n"
+ " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \n"
+ "New behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n"
+ " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ",
+ FutureWarning,
+ )
+
+ if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
+ logger.warning(
+ "Note that to_tf_dataset() loads the data with a generator rather than a full tf.data "
+ "pipeline and is not compatible with remote TPU connections. If you encounter errors, please "
+ "try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of "
+ "Tensors instead of streaming with to_tf_dataset()."
+ )
+
+ if collate_fn is None:
+ # Set a very simple default collator that just stacks things together
+ collate_fn = minimal_tf_collate_fn
+ if collate_fn_args is None:
+ collate_fn_args = {}
+ if label_cols and not columns:
+ raise ValueError("Cannot specify label_cols without specifying columns!")
+ if label_cols is None:
+ label_cols = []
+ elif isinstance(label_cols, str):
+ label_cols = [label_cols]
+ if len(set(label_cols)) < len(label_cols):
+ raise ValueError("List of label_cols contains duplicates.")
+ if columns:
+ if isinstance(columns, str):
+ columns = [columns]
+ if len(set(columns)) < len(columns):
+ raise ValueError("List of columns contains duplicates.")
+ cols_to_retain = list(set(columns + label_cols))
+ else:
+ cols_to_retain = None # Indicates keeping all valid columns
+ columns = []
+
+ if self.format["type"] not in ["custom", "numpy"]:
+ dataset = self.with_format("numpy")
+ else:
+ dataset = self
+
+ # TODO(Matt, QL): deprecate the retention of label_ids and label
+
+ output_signature, columns_to_np_types = dataset._get_output_signature(
+ dataset,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ cols_to_retain=cols_to_retain,
+ batch_size=batch_size if drop_remainder else None,
+ num_test_batches=num_test_batches,
+ )
+
+ if "labels" in output_signature:
+ if ("label_ids" in columns or "label" in columns) and "labels" not in columns:
+ columns = [col for col in columns if col not in ["label_ids", "label"]] + ["labels"]
+ if ("label_ids" in label_cols or "label" in label_cols) and "labels" not in label_cols:
+ label_cols = [col for col in label_cols if col not in ["label_ids", "label"]] + ["labels"]
+
+ for col in columns:
+ if col not in output_signature:
+ raise ValueError(f"Column {col} not found in dataset!")
+
+ for col in label_cols:
+ if col not in output_signature:
+ raise ValueError(f"Label column {col} not found in dataset!")
+
+ if num_workers == 0:
+ tf_dataset = dataset_to_tf(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ )
+ elif num_workers > 0:
+ if batch_size is None:
+ raise NotImplementedError(
+ "`batch_size` must be specified when using multiple workers, as unbatched multiprocessing "
+ "is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0."
+ )
+ tf_dataset = multiprocess_dataset_to_tf(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ num_workers=num_workers,
+ )
+ else:
+ raise ValueError("num_workers must be >= 0")
+
+ def split_features_and_labels(input_batch):
+ # TODO(Matt, QL): deprecate returning the dict content when there's only one key
+ features = {key: tensor for key, tensor in input_batch.items() if key in columns}
+ labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols}
+ if len(features) == 1:
+ features = list(features.values())[0]
+ if len(labels) == 1:
+ labels = list(labels.values())[0]
+ if isinstance(labels, dict) and len(labels) == 0:
+ return features
+ else:
+ return features, labels
+
+ if cols_to_retain is not None:
+ tf_dataset = tf_dataset.map(split_features_and_labels)
+
+ if prefetch:
+ tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
+
+ # Remove a reference to the open Arrow file on delete
+ def cleanup_callback(ref):
+ dataset.__del__()
+ self._TF_DATASET_REFS.remove(ref)
+
+ self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback))
+
+ return tf_dataset
+
+
+class DatasetTransformationNotAllowedError(Exception):
+ pass
+
+
+def transmit_format(func):
+ """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset"""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Dataset" = args[0]
+ args = args[1:]
+ else:
+ self: "Dataset" = kwargs.pop("self")
+ # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None
+ unformatted_columns = set(self.column_names) - set(self._format_columns or [])
+ self_format = {
+ "type": self._format_type,
+ "format_kwargs": self._format_kwargs,
+ "columns": self._format_columns,
+ "output_all_columns": self._output_all_columns,
+ }
+ # apply actual function
+ out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
+ datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
+ # re-apply format to the output
+ for dataset in datasets:
+ new_format = self_format.copy()
+ if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns)
+ # sort the columns to have a deterministic list of columns that we can compare with `out_format`
+ new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns)
+ out_format = {
+ "type": dataset._format_type,
+ "format_kwargs": dataset._format_kwargs,
+ "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None,
+ "output_all_columns": dataset._output_all_columns,
+ }
+ if out_format != new_format:
+ fingerprint = dataset._fingerprint
+ dataset.set_format(**new_format)
+ dataset._fingerprint = fingerprint
+ return out
+
+ wrapper._decorator_name_ = "transmit_format"
+ return wrapper
+
+
+def transmit_tasks(func):
+ """Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset"""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Dataset" = args[0]
+ args = args[1:]
+ else:
+ self: "Dataset" = kwargs.pop("self")
+ # apply actual function
+ out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
+ datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
+ for dataset in datasets:
+ # Remove task templates if a column mapping of the template is no longer valid
+ if self.info.task_templates is not None:
+ dataset.info.task_templates = [
+ template
+ for template in self.info.task_templates
+ if all(
+ dataset._info.features.get(k) == self._info.features.get(k)
+ for k in template.column_mapping.keys()
+ )
+ ]
+ return out
+
+ wrapper._decorator_name_ = "transmit_tasks"
+ return wrapper
+
+
+def update_metadata_with_features(table: Table, features: Features):
+ """To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema."""
+ features = Features({col_name: features[col_name] for col_name in table.column_names})
+ if table.schema.metadata is None or b"huggingface" not in table.schema.metadata:
+ pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features))
+ else:
+ metadata = json.loads(table.schema.metadata[b"huggingface"].decode())
+ if "info" not in metadata:
+ metadata["info"] = asdict(DatasetInfo(features=features))
+ else:
+ metadata["info"]["features"] = asdict(DatasetInfo(features=features))["features"]
+ pa_metadata = {"huggingface": json.dumps(metadata)}
+ table = table.replace_schema_metadata(pa_metadata)
+ return table
+
+
+def _check_table(table) -> Table:
+ """We check the table type to make sure it's an instance of :class:`datasets.table.Table`"""
+ if isinstance(table, pa.Table):
+ # for a pyarrow table, we can just consider it as a in-memory table
+ # this is here for backward compatibility
+ return InMemoryTable(table)
+ elif isinstance(table, Table):
+ return table
+ else:
+ raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.")
+
+
+def _check_column_names(column_names: List[str]):
+ """Check the column names to make sure they don't contain duplicates."""
+ counter = Counter(column_names)
+ if not all(count == 1 for count in counter.values()):
+ duplicated_columns = [col for col in counter if counter[col] > 1]
+ raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.")
+
+
+def _check_valid_indices_value(index, size):
+ if (index < 0 and index + size < 0) or (index >= size):
+ raise IndexError(f"Index {index} out of range for dataset of size {size}.")
+
+
+class NonExistentDatasetError(Exception):
+ """Used when we expect the existence of a dataset"""
+
+ pass
+
+
+class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin):
+ """A Dataset backed by an Arrow table."""
+
+ def __init__(
+ self,
+ arrow_table: Table,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_table: Optional[Table] = None,
+ fingerprint: Optional[str] = None,
+ ):
+ info = info.copy() if info is not None else DatasetInfo()
+ DatasetInfoMixin.__init__(self, info=info, split=split)
+ IndexableMixin.__init__(self)
+
+ self._data: Table = _check_table(arrow_table)
+ self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None
+ maybe_register_dataset_for_temp_dir_deletion(self)
+
+ self._format_type: Optional[str] = None
+ self._format_kwargs: dict = {}
+ self._format_columns: Optional[list] = None
+ self._output_all_columns: bool = False
+ self._fingerprint: str = fingerprint
+
+ # Read metadata
+
+ if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata:
+ metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode())
+ if (
+ "fingerprint" in metadata and self._fingerprint is None
+ ): # try to load fingerprint from the arrow file metadata
+ self._fingerprint = metadata["fingerprint"]
+
+ # Infer features if None
+ inferred_features = Features.from_arrow_schema(arrow_table.schema)
+ if self.info.features is None:
+ self.info.features = inferred_features
+ else: # make sure the nested columns are in the right order
+ try:
+ self.info.features = self.info.features.reorder_fields_as(inferred_features)
+ except ValueError as e:
+ raise ValueError(
+ f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file."
+ )
+
+ # Infer fingerprint if None
+
+ if self._fingerprint is None:
+ self._fingerprint = generate_fingerprint(self)
+
+ # Sanity checks
+
+ if self._info.features is None:
+ raise ValueError("Features can't be None in a Dataset object")
+ if self._fingerprint is None:
+ raise ValueError("Fingerprint can't be None in a Dataset object")
+ if self.info.features.type != inferred_features.type:
+ raise ValueError(
+ f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}"
+ )
+
+ if self._indices is not None:
+ if not pa.types.is_unsigned_integer(self._indices.column(0).type):
+ raise ValueError(
+ f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}"
+ )
+ _check_column_names(self._data.column_names)
+
+ self._data = update_metadata_with_features(self._data, self._info.features)
+
+ @property
+ def features(self) -> Features:
+ features = super().features
+ if features is None: # this is already checked in __init__
+ raise ValueError("Features can't be None in a Dataset object")
+ return features
+
+ @classmethod
+ def from_file(
+ cls,
+ filename: str,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_filename: Optional[str] = None,
+ in_memory: bool = False,
+ ) -> "Dataset":
+ """Instantiate a Dataset backed by an Arrow table at filename.
+
+ Args:
+ filename (`str`):
+ File name of the dataset.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ indices_filename (`str`, *optional*):
+ File names of the indices.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ [`Dataset`]
+ """
+ table = ArrowReader.read_table(filename, in_memory=in_memory)
+
+ if indices_filename is not None:
+ indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory)
+ else:
+ indices_pa_table = None
+
+ return cls(
+ arrow_table=table,
+ info=info,
+ split=split,
+ indices_table=indices_pa_table,
+ )
+
+ @classmethod
+ def from_buffer(
+ cls,
+ buffer: pa.Buffer,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_buffer: Optional[pa.Buffer] = None,
+ ) -> "Dataset":
+ """Instantiate a Dataset backed by an Arrow buffer.
+
+ Args:
+ buffer (`pyarrow.Buffer`):
+ Arrow buffer.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ indices_buffer (`pyarrow.Buffer`, *optional*):
+ Indices Arrow buffer.
+
+ Returns:
+ [`Dataset`]
+ """
+ table = InMemoryTable.from_buffer(buffer)
+
+ if indices_buffer is not None:
+ indices_table = InMemoryTable.from_buffer(buffer)
+ else:
+ indices_table = None
+
+ return cls(table, info=info, split=split, indices_table=indices_table)
+
+ @classmethod
+ def from_pandas(
+ cls,
+ df: pd.DataFrame,
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ preserve_index: Optional[bool] = None,
+ ) -> "Dataset":
+ """
+ Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`].
+
+ The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the
+ DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the
+ case of `object`, we need to guess the datatype by looking at the Python objects in this Series.
+
+ Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow
+ type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only
+ contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit
+ features and passing it to this function.
+
+ Args:
+ df (`pandas.DataFrame`):
+ Dataframe that contains the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ preserve_index (`bool`, *optional*):
+ Whether to store the index as an additional column in the resulting Dataset.
+ The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only.
+ Use `preserve_index=True` to force it to be stored as a column.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_pandas(df)
+ ```
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ table = InMemoryTable.from_pandas(
+ df=df,
+ preserve_index=preserve_index,
+ )
+ if features is not None:
+ # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema)
+ # needed to support the str to Audio conversion for instance
+ table = table.cast(features.arrow_schema)
+ return cls(table, info=info, split=split)
+
+ @classmethod
+ def from_polars(
+ cls,
+ df: "pl.DataFrame",
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Collect the underlying arrow arrays in an Arrow Table.
+
+ This operation is mostly zero copy.
+
+ Data types that do copy:
+ * CategoricalType
+
+ Args:
+ df (`polars.DataFrame`): DataFrame to convert to Arrow Table
+ features (`Features`, optional): Dataset features.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+
+ Examples:
+ ```py
+ >>> ds = Dataset.from_polars(df)
+ ```
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ table = InMemoryTable(df.to_arrow())
+ if features is not None:
+ # more expensive cast than InMemoryTable.from_polars(..., schema=features.arrow_schema)
+ # needed to support the str to Audio conversion for instance
+ table = table.cast(features.arrow_schema)
+ return cls(table, info=info, split=split)
+
+ @classmethod
+ def from_dict(
+ cls,
+ mapping: dict,
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Convert `dict` to a `pyarrow.Table` to create a [`Dataset`].
+
+ Args:
+ mapping (`Mapping`):
+ Mapping of strings to Arrays or Python lists.
+ features ([`Features`], *optional*):
+ Dataset features.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+
+ Returns:
+ [`Dataset`]
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ arrow_typed_mapping = {}
+ for col, data in mapping.items():
+ if isinstance(data, (pa.Array, pa.ChunkedArray)):
+ data = cast_array_to_feature(data, features[col]) if features is not None else data
+ else:
+ data = OptimizedTypedSequence(
+ features.encode_column(data, col) if features is not None else data,
+ type=features[col] if features is not None else None,
+ col=col,
+ )
+ arrow_typed_mapping[col] = data
+ mapping = arrow_typed_mapping
+ pa_table = InMemoryTable.from_pydict(mapping=mapping)
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ if info.features is None:
+ info.features = Features(
+ {
+ col: generate_from_arrow_type(data.type)
+ if isinstance(data, (pa.Array, pa.ChunkedArray))
+ else data.get_inferred_type()
+ for col, data in mapping.items()
+ }
+ )
+ return cls(pa_table, info=info, split=split)
+
+ @classmethod
+ def from_list(
+ cls,
+ mapping: List[dict],
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`.
+
+ Note that the keys of the first entry will be used to determine the dataset columns,
+ regardless of what is passed to features.
+
+ Args:
+ mapping (`List[dict]`): A list of mappings of strings to row values.
+ features (`Features`, optional): Dataset features.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+
+ Returns:
+ [`Dataset`]
+ """
+ # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here
+ mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {}
+ return cls.from_dict(mapping, features, info, split)
+
+ @staticmethod
+ def from_csv(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from CSV file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the CSV file(s).
+ split ([`NamedSplit`], *optional*):
+ Split name to be assigned to the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`pandas.read_csv`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_csv('path/to/dataset.csv')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetReader
+
+ return CsvDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_generator(
+ generator: Callable,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ gen_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create a Dataset from a generator.
+
+ Args:
+ generator (:`Callable`):
+ A generator function that `yields` examples.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ gen_kwargs(`dict`, *optional*):
+ Keyword arguments to be passed to the `generator` callable.
+ You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+ If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to :[`GeneratorConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> def gen():
+ ... yield {"text": "Good", "label": 0}
+ ... yield {"text": "Bad", "label": 1}
+ ...
+ >>> ds = Dataset.from_generator(gen)
+ ```
+
+ ```py
+ >>> def gen(shards):
+ ... for shard in shards:
+ ... with open(shard) as f:
+ ... for line in f:
+ ... yield {"line": line}
+ ...
+ >>> shards = [f"data{i}.txt" for i in range(32)]
+ >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards})
+ ```
+ """
+ from .io.generator import GeneratorDatasetInputStream
+
+ return GeneratorDatasetInputStream(
+ generator=generator,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ gen_kwargs=gen_kwargs,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_json(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ field: Optional[str] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from JSON or JSON Lines file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the JSON or JSON Lines file(s).
+ split ([`NamedSplit`], *optional*):
+ Split name to be assigned to the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ field (`str`, *optional*):
+ Field name of the JSON file where the dataset is contained in.
+ num_proc (`int`, *optional* defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`JsonConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_json('path/to/dataset.json')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetReader
+
+ return JsonDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ field=field,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_parquet(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ columns: Optional[List[str]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from Parquet file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the Parquet file(s).
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ columns (`List[str]`, *optional*):
+ If not `None`, only these columns will be read from the file.
+ A column name may be a prefix of a nested field, e.g. 'a' will select
+ 'a.b', 'a.c', and 'a.d.e'.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`ParquetConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_parquet('path/to/dataset.parquet')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetReader
+
+ return ParquetDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ columns=columns,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_text(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from text file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the text file(s).
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`TextConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_text('path/to/dataset.txt')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.text import TextDatasetReader
+
+ return TextDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_spark(
+ df: "pyspark.sql.DataFrame",
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ keep_in_memory: bool = False,
+ cache_dir: str = None,
+ working_dir: str = None,
+ load_from_cache_file: bool = True,
+ **kwargs,
+ ):
+ """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers.
+
+ Args:
+ df (`pyspark.sql.DataFrame`):
+ The DataFrame containing the desired data.
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both
+ workers and the driver.
+ keep_in_memory (`bool`):
+ Whether to copy the data in-memory.
+ working_dir (`str`, *optional*)
+ Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting
+ a non-NFS intermediate directory may improve performance.
+ load_from_cache_file (`bool`):
+ Whether to load the dataset from the cache if possible.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> df = spark.createDataFrame(
+ >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
+ >>> columns=["id", "name"],
+ >>> )
+ >>> ds = Dataset.from_spark(df)
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.spark import SparkDatasetReader
+
+ if sys.platform == "win32":
+ raise EnvironmentError("Dataset.from_spark is not currently supported on Windows")
+
+ return SparkDatasetReader(
+ df,
+ split=split,
+ features=features,
+ streaming=False,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ working_dir=working_dir,
+ load_from_cache_file=load_from_cache_file,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_sql(
+ sql: Union[str, "sqlalchemy.sql.Selectable"],
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ):
+ """Create Dataset from SQL query or database table.
+
+ Args:
+ sql (`str` or `sqlalchemy.sql.Selectable`):
+ SQL query to be executed or a table name.
+ con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
+ A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`SqlConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> # Fetch a database table
+ >>> ds = Dataset.from_sql("test_data", "postgres:///db_name")
+ >>> # Execute a SQL query on the table
+ >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name")
+ >>> # Use a Selectable object to specify the query
+ >>> from sqlalchemy import select, text
+ >>> stmt = select([text("sentence")]).select_from(text("test_data"))
+ >>> ds = Dataset.from_sql(stmt, "postgres:///db_name")
+ ```
+
+
+
+ The returned dataset can only be cached if `con` is specified as URI string.
+
+
+ """
+ from .io.sql import SqlDatasetReader
+
+ return SqlDatasetReader(
+ sql,
+ con,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ **kwargs,
+ ).read()
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ maybe_register_dataset_for_temp_dir_deletion(self)
+ return self
+
+ def __del__(self):
+ if hasattr(self, "_data"):
+ del self._data
+ if hasattr(self, "_indices"):
+ del self._indices
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
+ self.__del__()
+
+ def save_to_disk(
+ self,
+ dataset_path: PathLike,
+ fs="deprecated",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_shards: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ ):
+ """
+ Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ For [`Image`] and [`Audio`] data:
+
+ All the Image() and Audio() data are stored in the arrow files.
+ If you want to store paths or urls, please use the Value("string") type.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
+ of the dataset directory where the dataset will be saved to.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"50MB"`).
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
+
+
+ num_proc (`int`, *optional*):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> ds.save_to_disk("path/to/dataset/directory")
+ >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
+ >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024)
+ ```
+ """
+ if max_shard_size is not None and num_shards is not None:
+ raise ValueError(
+ "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
+ )
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ if self.list_indexes():
+ raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset")
+
+ if num_shards is None:
+ dataset_nbytes = self._estimate_nbytes()
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
+ num_shards = max(num_shards, num_proc or 1)
+
+ num_proc = num_proc if num_proc is not None else 1
+ num_shards = num_shards if num_shards is not None else num_proc
+
+ fs: fsspec.AbstractFileSystem
+ fs, _ = url_to_fs(dataset_path, **(storage_options or {}))
+
+ if not is_remote_filesystem(fs):
+ parent_cache_files_paths = {
+ Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files
+ }
+ # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux.
+ if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths:
+ raise PermissionError(
+ f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself."
+ )
+
+ fs.makedirs(dataset_path, exist_ok=True)
+
+ # Get json serializable state
+ state = {
+ key: self.__dict__[key]
+ for key in [
+ "_fingerprint",
+ "_format_columns",
+ "_format_kwargs",
+ "_format_type",
+ "_output_all_columns",
+ ]
+ }
+ state["_split"] = str(self.split) if self.split is not None else self.split
+ state["_data_files"] = [
+ {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards)
+ ]
+ for k in state["_format_kwargs"].keys():
+ try:
+ json.dumps(state["_format_kwargs"][k])
+ except TypeError as e:
+ raise TypeError(
+ str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't."
+ ) from None
+ # Get json serializable dataset info
+ dataset_info = asdict(self._info)
+
+ shards_done = 0
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=len(self),
+ desc=f"Saving the dataset ({shards_done}/{num_shards} shards)",
+ )
+ kwargs_per_job = (
+ {
+ "job_id": shard_idx,
+ "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True),
+ "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"),
+ "storage_options": storage_options,
+ }
+ for shard_idx in range(num_shards)
+ )
+ shard_lengths = [None] * num_shards
+ shard_sizes = [None] * num_shards
+ if num_proc > 1:
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ shards_done += 1
+ pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
+ logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
+ shard_lengths[job_id], shard_sizes[job_id] = content
+ else:
+ pbar.update(content)
+ else:
+ with pbar:
+ for kwargs in kwargs_per_job:
+ for job_id, done, content in Dataset._save_to_disk_single(**kwargs):
+ if done:
+ shards_done += 1
+ pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
+ logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
+ shard_lengths[job_id], shard_sizes[job_id] = content
+ else:
+ pbar.update(content)
+ with fs.open(
+ posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8"
+ ) as state_file:
+ json.dump(state, state_file, indent=2, sort_keys=True)
+ with fs.open(
+ posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8"
+ ) as dataset_info_file:
+ # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True
+ sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)}
+ json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2)
+
+ @staticmethod
+ def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]):
+ batch_size = config.DEFAULT_MAX_BATCH_SIZE
+
+ num_examples_progress_update = 0
+ writer = ArrowWriter(
+ features=shard.features,
+ path=fpath,
+ storage_options=storage_options,
+ embed_local_files=True,
+ )
+ try:
+ _time = time.time()
+ for pa_table in shard.with_format("arrow").iter(batch_size):
+ writer.write_table(pa_table)
+ num_examples_progress_update += len(pa_table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+
+ yield job_id, True, (num_examples, num_bytes)
+
+ @staticmethod
+ def _build_local_temp_path(uri_or_path: str) -> Path:
+ """
+ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative
+ path extracted from the uri) passed.
+
+ Args:
+ uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g.
+ `"s3://my-bucket/dataset/train"`) to concatenate.
+
+ Returns:
+ :class:`Path`: the concatenated path (temp dir + path)
+ """
+ src_dataset_path = Path(uri_or_path)
+ tmp_dir = get_temporary_cache_files_directory()
+ return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor))
+
+ @staticmethod
+ def load_from_disk(
+ dataset_path: str,
+ fs="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ storage_options: Optional[dict] = None,
+ ) -> "Dataset":
+ """
+ Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a
+ filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
+ of the dataset directory where the dataset will be loaded from.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the
+ dataset will not be copied in-memory unless explicitly enabled by setting
+ `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
+ [improve performance](../cache#improve-performance) section.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - If `dataset_path` is a path of a dataset directory, the dataset requested.
+ - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split.
+
+ Example:
+
+ ```py
+ >>> ds = load_from_disk("path/to/dataset/directory")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, dataset_path = url_to_fs(dataset_path, **(storage_options or {}))
+
+ dest_dataset_path = dataset_path
+ dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME)
+ dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
+
+ dataset_dict_is_file = fs.isfile(dataset_dict_json_path)
+ dataset_info_is_file = fs.isfile(dataset_info_path)
+ dataset_state_is_file = fs.isfile(dataset_state_json_path)
+ if not dataset_info_is_file and not dataset_state_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+ if not dataset_info_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+ if not dataset_state_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+
+ # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies
+ if is_remote_filesystem(fs):
+ src_dataset_path = dest_dataset_path
+ dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path)
+ fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True)
+ dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
+
+ with open(dataset_state_json_path, encoding="utf-8") as state_file:
+ state = json.load(state_file)
+ with open(dataset_info_path, encoding="utf-8") as dataset_info_file:
+ dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file))
+
+ dataset_size = estimate_dataset_size(
+ Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]
+ )
+ keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size)
+ table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable
+
+ arrow_table = concat_tables(
+ thread_map(
+ table_cls.from_file,
+ [posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]],
+ tqdm_class=hf_tqdm,
+ desc="Loading dataset from disk",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(state["_data_files"]) <= 16 or None,
+ )
+ )
+
+ split = state["_split"]
+ split = Split(split) if split is not None else split
+
+ dataset = Dataset(
+ arrow_table=arrow_table,
+ info=dataset_info,
+ split=split,
+ fingerprint=state["_fingerprint"],
+ )
+
+ format = {
+ "type": state["_format_type"],
+ "format_kwargs": state["_format_kwargs"],
+ "columns": state["_format_columns"],
+ "output_all_columns": state["_output_all_columns"],
+ }
+ dataset = dataset.with_format(**format)
+
+ return dataset
+
+ @property
+ def data(self) -> Table:
+ """The Apache Arrow table backing the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.data
+ MemoryMappedTable
+ text: string
+ label: int64
+ ----
+ text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]]
+ label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]]
+ ```
+ """
+ return self._data
+
+ @property
+ def cache_files(self) -> List[dict]:
+ """The cache files containing the Apache Arrow table backing the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.cache_files
+ [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]
+ ```
+ """
+ cache_files = list_table_cache_files(self._data)
+ if self._indices is not None:
+ cache_files += list_table_cache_files(self._indices)
+ return [{"filename": cache_filename} for cache_filename in cache_files]
+
+ @property
+ def num_columns(self) -> int:
+ """Number of columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.num_columns
+ 2
+ ```
+ """
+ return self._data.num_columns
+
+ @property
+ def num_rows(self) -> int:
+ """Number of rows in the dataset (same as [`Dataset.__len__`]).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.num_rows
+ 1066
+ ```
+ """
+ if self._indices is not None:
+ return self._indices.num_rows
+ return self._data.num_rows
+
+ @property
+ def column_names(self) -> List[str]:
+ """Names of the columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.column_names
+ ['text', 'label']
+ ```
+ """
+ return self._data.column_names
+
+ @property
+ def shape(self) -> Tuple[int, int]:
+ """Shape of the dataset (number of columns, number of rows).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.shape
+ (1066, 2)
+ ```
+ """
+ if self._indices is not None:
+ return (self._indices.num_rows, self._data.num_columns)
+ return self._data.shape
+
+ def unique(self, column: str) -> List:
+ """Return a list of the unique elements in a column.
+
+ This is implemented in the low-level backend and as such, very fast.
+
+ Args:
+ column (`str`):
+ Column name (list all the column names with [`~datasets.Dataset.column_names`]).
+
+ Returns:
+ `list`: List of unique elements in the given column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.unique('label')
+ [1, 0]
+ ```
+ """
+ if column not in self._data.column_names:
+ raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
+
+ if self._indices is not None and self._indices.num_rows != self._data.num_rows:
+ dataset = self.flatten_indices()
+ else:
+ dataset = self
+
+ return dataset._data.column(column).unique().to_pylist()
+
+ def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset":
+ """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table.
+
+ Args:
+ column (`str`):
+ The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`])
+ include_nulls (`bool`, defaults to `False`):
+ Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("boolq", split="validation")
+ >>> ds.features
+ {'answer': Value(dtype='bool', id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ >>> ds = ds.class_encode_column('answer')
+ >>> ds.features
+ {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ ```
+ """
+ # Sanity checks
+ if column not in self._data.column_names:
+ raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
+ src_feat = self._info.features[column]
+ if not isinstance(src_feat, Value):
+ raise ValueError(
+ f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}."
+ )
+
+ if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)):
+
+ def stringify_column(batch):
+ batch[column] = [
+ str(sample) if include_nulls or sample is not None else None for sample in batch[column]
+ ]
+ return batch
+
+ dset = self.map(
+ stringify_column,
+ batched=True,
+ desc="Stringifying the column",
+ )
+ else:
+ dset = self
+
+ # Create the new feature
+ class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None)
+ dst_feat = ClassLabel(names=class_names)
+
+ def cast_to_class_labels(batch):
+ batch[column] = [
+ dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None
+ for sample in batch[column]
+ ]
+ return batch
+
+ new_features = dset.features.copy()
+ new_features[column] = dst_feat
+
+ dset = dset.map(
+ cast_to_class_labels,
+ batched=True,
+ features=new_features,
+ desc="Casting to class labels",
+ )
+
+ return dset
+
+ @fingerprint_transform(inplace=False)
+ def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset":
+ """Flatten the table.
+ Each column with a struct type is flattened into one column per struct field.
+ Other columns are left unchanged.
+
+ Args:
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with flattened columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad", split="train")
+ >>> ds.features
+ {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ >>> ds.flatten()
+ Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 87599
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ for depth in range(1, max_depth):
+ if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema):
+ dataset._data = dataset._data.flatten()
+ else:
+ break
+ dataset.info.features = self._info.features.flatten(max_depth=max_depth)
+ dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names})
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.')
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ def cast(
+ self,
+ features: Features,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ num_proc: Optional[int] = None,
+ ) -> "Dataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset.
+ batch_size (`int`, defaults to `1000`):
+ Number of examples per batch provided to cast.
+ If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ load_from_cache_file (`bool`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`].
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, ClassLabel, Value
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds.features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ if sorted(features) != sorted(self._data.column_names):
+ raise ValueError(
+ f"The columns in features ({list(features)}) must be identical "
+ f"as the columns in the dataset: {self._data.column_names}"
+ )
+
+ schema = features.arrow_schema
+ format = self.format
+ dataset = self.with_format("arrow")
+ # capture the PyArrow version here to make the lambda serializable on Windows
+ dataset = dataset.map(
+ partial(table_cast, schema=schema),
+ batched=True,
+ batch_size=batch_size,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ num_proc=num_proc,
+ features=features,
+ desc="Casting the dataset",
+ )
+ dataset = dataset.with_format(**format)
+ return dataset
+
+ @fingerprint_transform(inplace=False)
+ def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature (`FeatureType`):
+ Target feature.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ if hasattr(feature, "decode_example"):
+ dataset = copy.deepcopy(self)
+ dataset._info.features[column] = feature
+ dataset._fingerprint = new_fingerprint
+ dataset._data = dataset._data.cast(dataset.features.arrow_schema)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ return dataset
+ else:
+ features = self.features
+ features[column] = feature
+ return self.cast(features)
+
+ @transmit_tasks
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+
+ You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method
+ doesn't copy the data of the remaining columns and is thus faster.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.remove_columns('label')
+ Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ >>> ds = ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0
+ Dataset({
+ features: [],
+ num_rows: 0
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ missing_columns = set(column_names) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+
+ for column_name in column_names:
+ del dataset._info.features[column_name]
+
+ dataset._data = dataset._data.drop(column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @fingerprint_transform(inplace=False)
+ def rename_column(
+ self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None
+ ) -> "Dataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.rename_column('label', 'label_new')
+ Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ if original_column_name not in dataset._data.column_names:
+ raise ValueError(
+ f"Original column name {original_column_name} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+ if new_column_name in dataset._data.column_names:
+ raise ValueError(
+ f"New column name {new_column_name} already in the dataset. "
+ f"Please choose a column name which is not already in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+ if not new_column_name:
+ raise ValueError("New column name is empty.")
+
+ def rename(columns):
+ return [new_column_name if col == original_column_name else col for col in columns]
+
+ new_column_names = rename(self._data.column_names)
+ if self._format_columns is not None:
+ dataset._format_columns = rename(self._format_columns)
+
+ dataset._info.features = Features(
+ {
+ new_column_name if col == original_column_name else col: feature
+ for col, feature in self._info.features.items()
+ }
+ )
+
+ dataset._data = dataset._data.rename_columns(new_column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @fingerprint_transform(inplace=False)
+ def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with renamed columns
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
+ Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+
+ extra_columns = set(column_mapping.keys()) - set(dataset.column_names)
+ if extra_columns:
+ raise ValueError(
+ f"Original column names {extra_columns} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+
+ number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values()))
+ if number_of_duplicates_in_new_columns != 0:
+ raise ValueError(
+ "New column names must all be different, but this column mapping "
+ f"has {number_of_duplicates_in_new_columns} duplicates"
+ )
+
+ empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col]
+ if empty_new_columns:
+ raise ValueError(f"New column names {empty_new_columns} are empty.")
+
+ def rename(columns):
+ return [column_mapping[col] if col in column_mapping else col for col in columns]
+
+ new_column_names = rename(self._data.column_names)
+ if self._format_columns is not None:
+ dataset._format_columns = rename(self._format_columns)
+
+ dataset._info.features = Features(
+ {
+ column_mapping[col] if col in column_mapping else col: feature
+ for col, feature in (self._info.features or {}).items()
+ }
+ )
+
+ dataset._data = dataset._data.rename_columns(new_column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """Select one or several column(s) in the dataset and the features
+ associated to them.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform. If `None`,
+ the new fingerprint is computed using a hash of the previous
+ fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset object which only consists of
+ selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.select_columns(['text'])
+ Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ ```
+ """
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ missing_columns = set(column_names) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the "
+ "dataset. Current columns in the dataset: "
+ f"{self._data.column_names}."
+ )
+
+ dataset = copy.deepcopy(self)
+ dataset._data = dataset._data.select(column_names)
+ dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names})
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ def __len__(self):
+ """Number of rows in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.__len__
+
+ ```
+ """
+ return self.num_rows
+
+ def __iter__(self):
+ """Iterate through the examples.
+
+ If a formatting is set with [`Dataset.set_format`] rows will be returned with the
+ selected format.
+ """
+ if self._indices is None:
+ # Fast iteration
+ # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
+ format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
+ formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
+ batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER
+ for pa_subtable in table_iter(self.data, batch_size=batch_size):
+ for i in range(pa_subtable.num_rows):
+ pa_subtable_ex = pa_subtable.slice(i, 1)
+ formatted_output = format_table(
+ pa_subtable_ex,
+ 0,
+ formatter=formatter,
+ format_columns=self._format_columns,
+ output_all_columns=self._output_all_columns,
+ )
+ yield formatted_output
+ else:
+ for i in range(self.num_rows):
+ yield self._getitem(
+ i,
+ )
+
+ def iter(self, batch_size: int, drop_last_batch: bool = False):
+ """Iterate through the batches of size `batch_size`.
+
+ If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the
+ selected format.
+
+ Args:
+ batch_size (:obj:`int`): size of each batch to yield.
+ drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
+ dropped
+ """
+ if self._indices is None:
+ # Fast iteration
+ # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
+ format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
+ formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
+ for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch):
+ formatted_batch = format_table(
+ pa_subtable,
+ range(pa_subtable.num_rows),
+ formatter=formatter,
+ format_columns=self._format_columns,
+ output_all_columns=self._output_all_columns,
+ )
+ yield formatted_batch
+ else:
+ num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size
+ for i in range(0, num_rows, batch_size):
+ yield self._getitem(
+ slice(i, i + batch_size),
+ )
+
+ def __repr__(self):
+ return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})"
+
+ @property
+ def format(self):
+ return {
+ "type": self._format_type,
+ "format_kwargs": self._format_kwargs,
+ "columns": self.column_names if self._format_columns is None else self._format_columns,
+ "output_all_columns": self._output_all_columns,
+ }
+
+ @contextlib.contextmanager
+ def formatted_as(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """To be used in a `with` statement. Set `__getitem__` return format (type and columns).
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__`` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+ """
+ old_format_type = self._format_type
+ old_format_kwargs = self._format_kwargs
+ old_format_columns = self._format_columns
+ old_output_all_columns = self._output_all_columns
+ try:
+ self.set_format(type, columns, output_all_columns, **format_kwargs)
+ yield
+ finally:
+ self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)
+
+ @fingerprint_transform(inplace=True)
+ def set_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`].
+
+ Args:
+ type (`str`, *optional*):
+ Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
+ gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as:
+
+ ```
+ new formatted columns = (all columns - previously unformatted columns)
+ ```
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type='numpy', columns=['text', 'label'])
+ >>> ds.format
+ {'type': 'numpy',
+ 'format_kwargs': {},
+ 'columns': ['text', 'label'],
+ 'output_all_columns': False}
+ ```
+ """
+ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format)
+
+ # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter
+ type = get_format_type_from_alias(type)
+ get_formatter(type, features=self._info.features, **format_kwargs)
+
+ # Check filter column
+ if isinstance(columns, str):
+ columns = [columns]
+ if isinstance(columns, tuple):
+ columns = list(columns)
+ if columns is not None:
+ missing_columns = set(columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+ if columns is not None:
+ columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs
+
+ self._format_type = type
+ self._format_kwargs = format_kwargs
+ self._format_columns = columns
+ self._output_all_columns = output_all_columns
+ logger.debug(
+ "Set __getitem__(key) output type to %s for %s columns "
+ " (when key is int or slice) and %s output other (un-formatted) columns.",
+ "python objects" if type is None else type,
+ "no" if columns is None else str(columns),
+ "do" if output_all_columns else "don't",
+ )
+
+ def reset_format(self):
+ """Reset `__getitem__` return format to python objects and all columns.
+
+ Same as `self.set_format()`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds.format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ >>> ds.reset_format()
+ >>> ds.format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ ```
+ """
+ self.set_format()
+
+ def set_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Args:
+ transform (`Callable`, *optional*):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to True, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
+ >>> def encode(batch):
+ ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt')
+ >>> ds.set_transform(encode)
+ >>> ds[0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1]),
+ 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895,
+ 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211,
+ 5637, 1998, 11690, 2336, 1012, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0])}
+ ```
+ """
+ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
+
+ Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object.
+
+ Args:
+ type (`str`, *optional*):
+ Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds.format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'tensorflow'}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+ return dataset
+
+ def with_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object.
+
+ Args:
+ transform (`Callable`, `optional`):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, `optional`):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to `True`, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> def encode(example):
+ ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt')
+ >>> ds = ds.with_transform(encode)
+ >>> ds[0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1]),
+ 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
+ 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
+ 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0])}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
+ return dataset
+
+ @deprecated()
+ def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset":
+ """
+ Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates).
+
+ Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting.
+
+ Args:
+ task (`Union[str, TaskTemplate]`):
+ The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include:
+
+ - `"text-classification"`
+ - `"question-answering"`
+
+ If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates).
+ id (`int`, defaults to `0`):
+ The id required to unambiguously identify the task template when multiple task templates of the same type are supported.
+ """
+ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD
+ if isinstance(task, str):
+ tasks = [template.task for template in (self.info.task_templates or [])]
+ compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task]
+ if not compatible_templates:
+ raise ValueError(
+ f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}"
+ )
+
+ if not 0 <= id < len(compatible_templates):
+ templates_list_str = "\n".join(
+ f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates)
+ )
+ raise ValueError(
+ f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}"
+ )
+ template = compatible_templates[id]
+ elif isinstance(task, TaskTemplate):
+ template = task
+ else:
+ raise ValueError(
+ f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}."
+ )
+ template = template.align_with_features(self.info.features)
+ column_mapping = template.column_mapping
+ columns_to_drop = [column for column in self.column_names if column not in column_mapping]
+ dataset = self.remove_columns(columns_to_drop)
+ dataset = dataset.rename_columns(column_mapping)
+ # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__`
+ dataset.info.task_templates = None
+ dataset = dataset.cast(features=template.features)
+ return dataset
+
+ def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]:
+ """
+ Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices)
+ """
+ if isinstance(key, bool):
+ raise TypeError("dataset index must be int, str, slice or collection of int, not bool")
+ format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type
+ format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns
+ output_all_columns = (
+ kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns
+ )
+ format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs
+ format_kwargs = format_kwargs if format_kwargs is not None else {}
+ formatter = get_formatter(format_type, features=self._info.features, **format_kwargs)
+ pa_subtable = query_table(self._data, key, indices=self._indices)
+ formatted_output = format_table(
+ pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns
+ )
+ return formatted_output
+
+ @overload
+ def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811
+ ...
+
+ @overload
+ def __getitem__(self, key: str) -> List: # noqa: F811
+ ...
+
+ def __getitem__(self, key): # noqa: F811
+ """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools)."""
+ return self._getitem(key)
+
+ def __getitems__(self, keys: List) -> List:
+ """Can be used to get a batch using a list of integers indices."""
+ batch = self.__getitem__(keys)
+ n_examples = len(batch[next(iter(batch))])
+ return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)]
+
+ def cleanup_cache_files(self) -> int:
+ """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is
+ one.
+
+ Be careful when running this command that no other process is currently using other cache files.
+
+ Returns:
+ `int`: Number of removed files.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.cleanup_cache_files()
+ 10
+ ```
+ """
+ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files]
+ if not current_cache_files:
+ return 0
+ cache_directory = os.path.dirname(current_cache_files[0])
+ logger.info(f"Listing files in {cache_directory}")
+ files: List[str] = os.listdir(cache_directory)
+ files_to_remove = []
+ for f_name in files:
+ full_name = os.path.abspath(os.path.join(cache_directory, f_name))
+ if f_name.startswith("cache-") and f_name.endswith(".arrow"):
+ if full_name in current_cache_files:
+ logger.info(f"Keeping currently used cache file at {full_name}")
+ continue
+ files_to_remove.append(full_name)
+ for file_path in files_to_remove:
+ logger.info(f"Removing {file_path}")
+ os.remove(file_path)
+ return len(files_to_remove)
+
+ def _get_cache_file_path(self, fingerprint):
+ if is_caching_enabled() and self.cache_files:
+ cache_file_name = "cache-" + fingerprint + ".arrow"
+ cache_directory = os.path.dirname(self.cache_files[0]["filename"])
+ else:
+ cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow"
+ cache_directory = get_temporary_cache_files_directory()
+ cache_file_path = os.path.join(cache_directory, cache_file_name)
+ return cache_file_path
+
+ @transmit_tasks
+ @transmit_format
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
+ new_fingerprint: Optional[str] = None,
+ desc: Optional[str] = None,
+ ) -> "Dataset":
+ """
+ Apply a function to all the examples in the table (individually or in batches) and update the table.
+ If your function returns a column that already exists, then it overwrites it.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
+ - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`): Function with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific Features to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Disallow null values in the table.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially.
+ suffix_template (`str`):
+ If `cache_file_name` is specified, then this suffix
+ will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for
+ `rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while mapping examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> ds[0:3]["text"]
+ ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
+ 'Review: the soundtrack alone is worth the price of admission .',
+ 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .']
+
+ # process a batch of examples
+ >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
+ # set number of processors
+ >>> ds = ds.map(add_prefix, num_proc=4)
+ ```
+ """
+ if keep_in_memory and cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.")
+
+ if num_proc is not None and num_proc <= 0:
+ raise ValueError("num_proc must be an integer > 0.")
+
+ # If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway)
+ if len(self) == 0:
+ if self._indices is not None: # empty indices mapping
+ self = Dataset(
+ self.data.slice(0, 0),
+ info=self.info.copy(),
+ split=self.split,
+ fingerprint=new_fingerprint,
+ )
+ if remove_columns:
+ return self.remove_columns(remove_columns)
+ else:
+ return self
+
+ if function is None:
+ function = lambda x: x # noqa: E731
+
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ if input_columns is not None:
+ missing_columns = set(input_columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+
+ if isinstance(remove_columns, str):
+ remove_columns = [remove_columns]
+
+ if remove_columns is not None:
+ missing_columns = set(remove_columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if fn_kwargs is None:
+ fn_kwargs = {}
+
+ if num_proc is not None and num_proc > len(self):
+ num_proc = len(self)
+ logger.warning(
+ f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}."
+ )
+
+ dataset_kwargs = {
+ "shard": self,
+ "function": function,
+ "with_indices": with_indices,
+ "with_rank": with_rank,
+ "input_columns": input_columns,
+ "batched": batched,
+ "batch_size": batch_size,
+ "drop_last_batch": drop_last_batch,
+ "remove_columns": remove_columns,
+ "keep_in_memory": keep_in_memory,
+ "writer_batch_size": writer_batch_size,
+ "features": features,
+ "disable_nullable": disable_nullable,
+ "fn_kwargs": fn_kwargs,
+ }
+
+ if new_fingerprint is None:
+ # we create a unique hash from the function,
+ # current dataset file and the mapping args
+ transform = format_transform_for_fingerprint(Dataset._map_single)
+ kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs)
+ kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint"
+ new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)
+ else:
+ validate_fingerprint(new_fingerprint)
+ dataset_kwargs["new_fingerprint"] = new_fingerprint
+
+ if self.cache_files:
+ if cache_file_name is None:
+ cache_file_name = self._get_cache_file_path(new_fingerprint)
+ dataset_kwargs["cache_file_name"] = cache_file_name
+
+ def load_processed_shard_from_cache(shard_kwargs):
+ """Load a processed shard from cache if it exists, otherwise throw an error."""
+ shard = shard_kwargs["shard"]
+ # Check if we've already cached this computation (indexed by a hash)
+ if shard_kwargs["cache_file_name"] is not None:
+ if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file:
+ info = shard.info.copy()
+ info.features = features
+ info.task_templates = None
+ return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split)
+ raise NonExistentDatasetError
+
+ num_shards = num_proc if num_proc is not None else 1
+ if batched and drop_last_batch:
+ pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size
+ else:
+ pbar_total = len(self)
+
+ shards_done = 0
+ if num_proc is None or num_proc == 1:
+ transformed_dataset = None
+ try:
+ transformed_dataset = load_processed_shard_from_cache(dataset_kwargs)
+ logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}")
+ except NonExistentDatasetError:
+ pass
+ if transformed_dataset is None:
+ with hf_tqdm(
+ unit=" examples",
+ total=pbar_total,
+ desc=desc or "Map",
+ ) as pbar:
+ for rank, done, content in Dataset._map_single(**dataset_kwargs):
+ if done:
+ shards_done += 1
+ logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
+ transformed_dataset = content
+ else:
+ pbar.update(content)
+ assert transformed_dataset is not None, "Failed to retrieve the result from map"
+ # update fingerprint if the dataset changed
+ if transformed_dataset._fingerprint != self._fingerprint:
+ transformed_dataset._fingerprint = new_fingerprint
+ return transformed_dataset
+ else:
+
+ def format_cache_file_name(
+ cache_file_name: Optional[str],
+ rank: Union[int, Literal["*"]], # noqa: F722
+ ) -> Optional[str]:
+ if not cache_file_name:
+ return cache_file_name
+ sep = cache_file_name.rindex(".")
+ base_name, extension = cache_file_name[:sep], cache_file_name[sep:]
+ if isinstance(rank, int):
+ cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension
+ logger.info(f"Process #{rank} will write at {cache_file_name}")
+ else:
+ cache_file_name = (
+ base_name
+ + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc)
+ + extension
+ )
+ return cache_file_name
+
+ def format_new_fingerprint(new_fingerprint: str, rank: int) -> str:
+ new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc)
+ validate_fingerprint(new_fingerprint)
+ return new_fingerprint
+
+ prev_env = deepcopy(os.environ)
+ # check if parallelism if off
+ # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22
+ if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in (
+ "",
+ "off",
+ "false",
+ "f",
+ "no",
+ "n",
+ "0",
+ ):
+ logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.")
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
+ shards = [
+ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory)
+ for rank in range(num_proc)
+ ]
+ kwargs_per_job = [
+ {
+ **dataset_kwargs,
+ "shard": shards[rank],
+ "cache_file_name": format_cache_file_name(cache_file_name, rank),
+ "rank": rank,
+ "offset": sum(len(s) for s in shards[:rank]),
+ "new_fingerprint": format_new_fingerprint(new_fingerprint, rank),
+ }
+ for rank in range(num_shards)
+ ]
+
+ transformed_shards = [None] * num_shards
+ for rank in range(num_shards):
+ try:
+ transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank])
+ kwargs_per_job[rank] = None
+ except NonExistentDatasetError:
+ pass
+
+ kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None]
+
+ # We try to create a pool with as many workers as dataset not yet cached.
+ if kwargs_per_job:
+ if len(kwargs_per_job) < num_shards:
+ logger.info(
+ f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache."
+ )
+ with Pool(len(kwargs_per_job)) as pool:
+ os.environ = prev_env
+ logger.info(f"Spawning {num_proc} processes")
+ with hf_tqdm(
+ unit=" examples",
+ total=pbar_total,
+ desc=(desc or "Map") + f" (num_proc={num_proc})",
+ ) as pbar:
+ for rank, done, content in iflatmap_unordered(
+ pool, Dataset._map_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ shards_done += 1
+ logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
+ transformed_shards[rank] = content
+ else:
+ pbar.update(content)
+ # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805)
+ for kwargs in kwargs_per_job:
+ del kwargs["shard"]
+ else:
+ logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}")
+ assert (
+ None not in transformed_shards
+ ), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results"
+ logger.info(f"Concatenating {num_proc} shards")
+ result = _concatenate_map_style_datasets(transformed_shards)
+ # update fingerprint if the dataset changed
+ if any(
+ transformed_shard._fingerprint != shard._fingerprint
+ for transformed_shard, shard in zip(transformed_shards, shards)
+ ):
+ result._fingerprint = new_fingerprint
+ else:
+ result._fingerprint = self._fingerprint
+ return result
+
+ @staticmethod
+ def _map_single(
+ shard: "Dataset",
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[List[str]] = None,
+ keep_in_memory: bool = False,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ new_fingerprint: Optional[str] = None,
+ rank: Optional[int] = None,
+ offset: int = 0,
+ ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]:
+ """Apply a function to all the elements in the table (individually or in batches)
+ and update the table (if function does update examples).
+
+ Args:
+ shard (`datasets.Dataset`): Dataset to map the transform on.
+ function (`Callable`): with one of the following signature:
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: lambda x: x
+ with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`): Provide batch of examples to `function`
+ batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`
+ `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`
+ drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
+ features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`): Disallow null values in the table.
+ fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function`
+ new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing
+ offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`.
+ """
+ if fn_kwargs is None:
+ fn_kwargs = {}
+
+ # If we do batch computation but no batch size is provided, default to the full dataset
+ if batched and (batch_size is None or batch_size <= 0):
+ batch_size = shard.num_rows
+
+ # We set this variable to True after processing the first example/batch in
+ # `apply_function_on_filtered_inputs` if the map function returns a dict.
+ # If set to False, no new arrow table will be created
+
+ update_data = None
+
+ format_kwargs = shard._format_kwargs.copy()
+ # Lazy formatting is only available for the default format (None/python)
+ if not input_columns and shard._format_type is None:
+ format_kwargs["lazy"] = True
+ input_formatter = get_formatter(
+ shard._format_type,
+ features=shard.features,
+ **format_kwargs,
+ )
+
+ class NumExamplesMismatchError(Exception):
+ pass
+
+ def validate_function_output(processed_inputs, indices):
+ """Validate output of the map function."""
+ if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)):
+ raise TypeError(
+ f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects."
+ )
+ elif isinstance(indices, list) and isinstance(processed_inputs, Mapping):
+ allowed_batch_return_types = (list, np.ndarray, pd.Series)
+ if config.POLARS_AVAILABLE and "polars" in sys.modules:
+ import polars as pl
+
+ allowed_batch_return_types += (pl.Series, pl.DataFrame)
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
+ import tensorflow as tf
+
+ allowed_batch_return_types += (tf.Tensor,)
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ allowed_batch_return_types += (torch.Tensor,)
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
+ import jax.numpy as jnp
+
+ allowed_batch_return_types += (jnp.ndarray,)
+ all_dict_values_are_lists = all(
+ isinstance(value, allowed_batch_return_types) for value in processed_inputs.values()
+ )
+ if all_dict_values_are_lists is False:
+ raise TypeError(
+ f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`."
+ )
+
+ def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0):
+ """Utility to apply the function on a selection of columns."""
+ nonlocal update_data
+ inputs = format_table(
+ pa_inputs,
+ 0 if not batched else range(pa_inputs.num_rows),
+ format_columns=input_columns,
+ formatter=input_formatter,
+ )
+ fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]
+ if offset == 0:
+ effective_indices = indices
+ else:
+ effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset
+ additional_args = ()
+ if with_indices:
+ additional_args += (effective_indices,)
+ if with_rank:
+ additional_args += (rank,)
+ processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
+ if isinstance(processed_inputs, LazyDict):
+ processed_inputs = {
+ k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format
+ }
+ returned_lazy_dict = True
+ else:
+ returned_lazy_dict = False
+ if update_data is None:
+ # Check if the function returns updated examples
+ update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame))
+ validate_function_output(processed_inputs, indices)
+ if not update_data:
+ return None # Nothing to update, let's move on
+ if shard._format_type or input_columns:
+ # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release)
+ inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns()))
+ elif isinstance(inputs, LazyDict):
+ inputs_to_merge = {
+ k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items()
+ }
+ else:
+ inputs_to_merge = inputs
+ if remove_columns is not None:
+ for column in remove_columns:
+ # `function` can modify input in-place causing column to be already removed.
+ if column in inputs_to_merge:
+ inputs_to_merge.pop(column)
+ if returned_lazy_dict and column in processed_inputs:
+ processed_inputs.pop(column)
+ if check_same_num_examples:
+ input_num_examples = len(pa_inputs)
+ processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))])
+ if input_num_examples != processed_inputs_num_examples:
+ raise NumExamplesMismatchError()
+ if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping):
+ # The .map() transform *updates* the dataset:
+ # the output dictionary contains both the the input data and the output data.
+ # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently.
+ return {**inputs_to_merge, **processed_inputs}
+ else:
+ return processed_inputs
+
+ def init_buffer_and_writer():
+ # Prepare output buffer and batched writer in memory or on file if we update the table
+ writer_features = features
+ if writer_features is None:
+ writer_features = shard.features
+ update_features = True
+ else:
+ update_features = False
+ if keep_in_memory or cache_file_name is None:
+ buf_writer = pa.BufferOutputStream()
+ tmp_file = None
+ writer = ArrowWriter(
+ features=writer_features,
+ stream=buf_writer,
+ writer_batch_size=writer_batch_size,
+ update_features=update_features,
+ fingerprint=new_fingerprint,
+ disable_nullable=disable_nullable,
+ )
+ else:
+ buf_writer = None
+ logger.info(f"Caching processed dataset at {cache_file_name}")
+ tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False)
+ writer = ArrowWriter(
+ features=writer_features,
+ path=tmp_file.name,
+ writer_batch_size=writer_batch_size,
+ update_features=update_features,
+ fingerprint=new_fingerprint,
+ disable_nullable=disable_nullable,
+ )
+ return buf_writer, writer, tmp_file
+
+ num_examples_progress_update = 0
+ # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer`
+ buf_writer, writer, tmp_file = None, None, None
+
+ # Check if Polars is available and import it if so
+ if config.POLARS_AVAILABLE and "polars" in sys.modules:
+ import polars as pl
+
+ # Optionally initialize the writer as a context manager
+ with contextlib.ExitStack() as stack:
+ try:
+ arrow_formatted_shard = shard.with_format("arrow")
+
+ # Loop over single examples or batches and write to buffer/file if examples are to be updated
+ if not batched:
+ shard_iterable = enumerate(arrow_formatted_shard)
+ else:
+ num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size
+ shard_iterable = zip(
+ range(0, num_rows, batch_size),
+ arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch),
+ )
+ if not batched:
+ _time = time.time()
+ for i, example in shard_iterable:
+ example = apply_function_on_filtered_inputs(example, i, offset=offset)
+ if update_data:
+ if i == 0:
+ buf_writer, writer, tmp_file = init_buffer_and_writer()
+ stack.enter_context(writer)
+ if isinstance(example, pa.Table):
+ writer.write_row(example)
+ elif isinstance(example, pd.DataFrame):
+ writer.write_row(pa.Table.from_pandas(example))
+ elif (
+ config.POLARS_AVAILABLE
+ and "polars" in sys.modules
+ and isinstance(example, pl.DataFrame)
+ ):
+ writer.write_row(example.to_arrow())
+ else:
+ writer.write(example)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield rank, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ else:
+ _time = time.time()
+ for i, batch in shard_iterable:
+ num_examples_in_batch = len(batch)
+ indices = list(
+ range(*(slice(i, i + batch_size).indices(shard.num_rows)))
+ ) # Something simpler?
+ try:
+ batch = apply_function_on_filtered_inputs(
+ batch,
+ indices,
+ check_same_num_examples=len(shard.list_indexes()) > 0,
+ offset=offset,
+ )
+ except NumExamplesMismatchError:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it."
+ ) from None
+ if update_data:
+ if i == 0:
+ buf_writer, writer, tmp_file = init_buffer_and_writer()
+ stack.enter_context(writer)
+ if isinstance(batch, pa.Table):
+ writer.write_table(batch)
+ elif isinstance(batch, pd.DataFrame):
+ writer.write_table(pa.Table.from_pandas(batch))
+ elif (
+ config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(batch, pl.DataFrame)
+ ):
+ writer.write_table(batch.to_arrow())
+ else:
+ writer.write_batch(batch)
+ num_examples_progress_update += num_examples_in_batch
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield rank, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ if update_data and writer is not None:
+ writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
+ except (Exception, KeyboardInterrupt):
+ yield rank, False, num_examples_progress_update
+ if update_data:
+ if writer is not None:
+ writer.finalize()
+ if tmp_file is not None:
+ tmp_file.close()
+ if os.path.exists(tmp_file.name):
+ os.remove(tmp_file.name)
+ raise
+
+ yield rank, False, num_examples_progress_update
+ if update_data and tmp_file is not None:
+ tmp_file.close()
+ shutil.move(tmp_file.name, cache_file_name)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(cache_file_name, 0o666 & ~umask)
+
+ if update_data:
+ # Create new Dataset from buffer or file
+ info = shard.info.copy()
+ info.features = writer._features
+ info.task_templates = None
+ if buf_writer is None:
+ yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split)
+ else:
+ yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split)
+ else:
+ yield rank, True, shard
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1"
+ )
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
+ new_fingerprint: Optional[str] = None,
+ desc: Optional[str] = None,
+ ) -> "Dataset":
+ """Apply a filter function to all the elements in the table in batches
+ and update the table so that the dataset only includes examples according to the filter function.
+
+ Args:
+ function (`Callable`): Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ If no function is provided, defaults to an always `True` function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if
+ `batched = True`. If `batched = False`, one example per batch is passed to `function`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ fn_kwargs (`dict`, *optional*):
+ Keyword arguments to be passed to `function`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ suffix_template (`str`):
+ If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each.
+ For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`,
+ the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default
+ `_{rank:05d}_of_{num_proc:05d}`).
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while filtering examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.filter(lambda x: x["label"] == 1)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`"
+ )
+
+ if function is None:
+ function = lambda x: True # noqa: E731
+
+ if len(self) == 0:
+ return self
+
+ indices = self.map(
+ function=partial(
+ get_indices_from_mask_function,
+ function,
+ batched,
+ with_indices,
+ with_rank,
+ input_columns,
+ self._indices,
+ ),
+ with_indices=True,
+ with_rank=True,
+ features=Features({"indices": Value("uint64")}),
+ batched=True,
+ batch_size=batch_size,
+ remove_columns=self.column_names,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ suffix_template=suffix_template,
+ new_fingerprint=new_fingerprint,
+ input_columns=input_columns,
+ desc=desc or "Filter",
+ )
+ new_dataset = copy.deepcopy(self)
+ new_dataset._indices = indices.data
+ new_dataset._fingerprint = new_fingerprint
+ return new_dataset
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"])
+ def flatten_indices(
+ self,
+ keep_in_memory: bool = False,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ num_proc: Optional[int] = None,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create and cache a new Dataset by flattening the indices mapping.
+
+ Args:
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_name (`str`, *optional*, default `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Allow null values in the table.
+ num_proc (`int`, optional, default `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ """
+
+ return self.map(
+ batched=True, # for speed
+ keep_in_memory=keep_in_memory,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ new_fingerprint=new_fingerprint,
+ desc="Flattening the indices",
+ num_proc=num_proc,
+ )
+
+ def _new_dataset_with_indices(
+ self,
+ indices_cache_file_name: Optional[str] = None,
+ indices_buffer: Optional[pa.Buffer] = None,
+ fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the
+ current Dataset.
+ """
+
+ if indices_cache_file_name is None and indices_buffer is None:
+ raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.")
+
+ if fingerprint is None:
+ raise ValueError("please specify a fingerprint for the dataset with indices")
+
+ if indices_cache_file_name is not None:
+ indices_table = MemoryMappedTable.from_file(indices_cache_file_name)
+ else:
+ indices_table = InMemoryTable.from_buffer(indices_buffer)
+
+ # Return new Dataset object
+ # don't forget to copy the objects
+ return Dataset(
+ self._data,
+ info=self.info.copy(),
+ split=self.split,
+ indices_table=indices_table,
+ fingerprint=fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
+ def select(
+ self,
+ indices: Iterable,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows selected following the list/array of indices.
+
+ Args:
+ indices (`range`, `list`, `iterable`, `ndarray` or `Series`):
+ Range, list or 1D-array of integer indices for indexing.
+ If the indices correspond to a contiguous range, the Arrow table is simply sliced.
+ However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient,
+ but still faster than recreating an Arrow table made of the requested rows.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the indices mapping in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.select(range(4))
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # If indices is a PyArrow array, we convert to NumPy
+ if isinstance(indices, (pa.Array, pa.ChunkedArray)):
+ indices = indices.to_numpy().astype(np.int64)
+
+ # Convert generator objects to lists
+ if isinstance(indices, Iterator):
+ indices = list(indices)
+
+ # If the indices are contiguous, simply slice the arrow table
+ if isinstance(indices, range):
+ if _is_range_contiguous(indices) and indices.start >= 0:
+ start, length = indices.start, indices.stop - indices.start
+ return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
+ else:
+ try:
+ start = next(iter(indices))
+ except StopIteration:
+ # if `indices` is an empty iterable, we return an empty dataset
+ return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
+ if start >= 0:
+ counter_from_start = itertools.count(start=start)
+ if all(i == j for i, j in zip(indices, counter_from_start)):
+ length = next(counter_from_start) - start
+ return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
+
+ # If not contiguous, we need to create a new indices mapping
+ return self._select_with_indices_mapping(
+ indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def _select_contiguous(
+ self,
+ start: int,
+ length: int,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows from a contiguous slice of data.
+ The slice is defined by that start index and its length.
+
+ Args:
+ start (`int`): start index.
+ length (`int`): length of the slice to select.
+ new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds._select_contiguous(0, 4)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ _check_valid_indices_value(start, len(self))
+ _check_valid_indices_value(start + length - 1, len(self))
+ if self._indices is None or length == 0:
+ return Dataset(
+ self.data.slice(start, length),
+ info=self.info.copy(),
+ split=self.split,
+ fingerprint=new_fingerprint,
+ )
+ else:
+ return Dataset(
+ self.data,
+ info=self.info.copy(),
+ split=self.split,
+ indices_table=self._indices.slice(start, length),
+ fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
+ def _select_with_indices_mapping(
+ self,
+ indices: Iterable,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows selected following the list/array of indices.
+ The new dataset is made by creating a new indices mapping on top of the main arrow table.
+
+ Args:
+ indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing.
+ keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
+ new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds._select_with_indices_mapping(range(4))
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # Prepare the writer for our indices arrow table
+ if keep_in_memory or indices_cache_file_name is None:
+ buf_writer = pa.BufferOutputStream()
+ tmp_file = None
+ writer = ArrowWriter(
+ stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
+ )
+ else:
+ buf_writer = None
+ logger.info(f"Caching indices mapping at {indices_cache_file_name}")
+ tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False)
+ writer = ArrowWriter(
+ path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
+ )
+
+ indices = indices if isinstance(indices, list) else list(indices)
+
+ size = len(self)
+ if indices:
+ _check_valid_indices_value(int(max(indices)), size=size)
+ _check_valid_indices_value(int(min(indices)), size=size)
+ else:
+ return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
+
+ indices_array = pa.array(indices, type=pa.uint64())
+ # Check if we need to convert indices
+ if self._indices is not None:
+ indices_array = self._indices.column(0).take(indices_array)
+
+ indices_table = pa.Table.from_arrays([indices_array], names=["indices"])
+
+ with writer:
+ try:
+ writer.write_table(indices_table)
+ writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file
+ except (Exception, KeyboardInterrupt):
+ if tmp_file is not None:
+ tmp_file.close()
+ if os.path.exists(tmp_file.name):
+ os.remove(tmp_file.name)
+ raise
+
+ if tmp_file is not None:
+ tmp_file.close()
+ shutil.move(tmp_file.name, indices_cache_file_name)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(indices_cache_file_name, 0o666 & ~umask)
+
+ # Return new Dataset object
+ if buf_writer is None:
+ return self._new_dataset_with_indices(
+ indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint
+ )
+ else:
+ return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)
+
+ def skip(self, n: int) -> "Dataset":
+ """
+ Create a new [`Dataset`] that skips the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to skip.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.skip(1)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'},
+ {'label': 1,
+ 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
+ ```
+ """
+ return self.select(range(n, len(self)))
+
+ def take(self, n: int) -> "Dataset":
+ """
+ Create a new [`Dataset`] with only the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to take.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> small_ds = ds.take(2)
+ >>> list(small_ds)
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
+ ```
+ """
+ return self.select(range(n))
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"])
+ def sort(
+ self,
+ column_names: Union[str, Sequence_[str]],
+ reverse: Union[bool, Sequence_[bool]] = False,
+ kind="deprecated",
+ null_placement: str = "at_end",
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset sorted according to a single or multiple columns.
+
+ Args:
+ column_names (`Union[str, Sequence[str]]`):
+ Column name(s) to sort by.
+ reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
+ If `True`, sort by descending order rather than ascending. If a single bool is provided,
+ the value is applied to the sorting of all column names. Otherwise a list of bools with the
+ same length and order as column_names must be provided.
+ kind (`str`, *optional*):
+ Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
+ The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general,
+ the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
+
+
+ `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
+
+
+ null_placement (`str`, defaults to `at_end`):
+ Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
+
+
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the sorted indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the sorted indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ sorted indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ Higher value gives smaller cache files, lower value consume less temporary memory.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='validation')
+ >>> ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> sorted_ds = ds.sort('label')
+ >>> sorted_ds['label'][:10]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
+ >>> another_sorted_ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # Deprecation warning
+ if kind != "deprecated":
+ warnings.warn(
+ "'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+
+ # Check proper format of and for duplicates in column_names
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ # Check proper format and length of reverse
+ if not isinstance(reverse, bool):
+ if len(reverse) != len(column_names):
+ raise ValueError(
+ "Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'."
+ )
+ else:
+ reverse = [reverse] * len(column_names)
+
+ # Check whether column name(s) exist in dataset
+ for column in column_names:
+ if not isinstance(column, str) or column not in self._data.column_names:
+ raise ValueError(
+ f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}"
+ )
+
+ # Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability
+ if null_placement not in ["at_start", "at_end"]:
+ if null_placement == "first":
+ null_placement = "at_start"
+ elif null_placement == "last":
+ null_placement = "at_end"
+ else:
+ raise ValueError(
+ f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'."
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+ indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
+ if os.path.exists(indices_cache_file_name) and load_from_cache_file:
+ logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}")
+ return self._new_dataset_with_indices(
+ fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
+ )
+
+ sort_table = query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ )
+
+ sort_keys = [
+ (col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse)
+ ]
+
+ indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement)
+
+ return self.select(
+ indices=indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]
+ )
+ def shuffle(
+ self,
+ seed: Optional[int] = None,
+ generator: Optional[np.random.Generator] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new Dataset where the rows are shuffled.
+
+ Currently shuffling uses numpy random generators.
+ You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
+
+ Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
+ However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
+ This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
+ To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
+ This may take a lot of time depending of the size of your dataset though:
+
+ ```python
+ my_dataset[0] # fast
+ my_dataset = my_dataset.shuffle(seed=42)
+ my_dataset[0] # up to 10x slower
+ my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
+ my_dataset[0] # fast again
+ ```
+
+ In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
+ It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal:
+
+ ```python
+ my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128)
+ for example in enumerate(my_iterable_dataset): # fast
+ pass
+
+ shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
+
+ for example in enumerate(shuffled_iterable_dataset): # as fast as before
+ pass
+ ```
+
+ Args:
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ keep_in_memory (`bool`, default `False`):
+ Keep the shuffled indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the shuffled indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ shuffled indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+
+ # set a seed
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> shuffled_ds['label'][:10]
+ [1, 0, 1, 1, 0, 0, 0, 0, 0, 0]
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if seed is not None and generator is not None:
+ raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.")
+
+ if generator is not None and not isinstance(generator, np.random.Generator):
+ raise ValueError("The provided generator must be an instance of numpy.random.Generator")
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if generator is None:
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ _ = np.random.random() # do 1 step of rng
+ generator = np.random.default_rng(seed)
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+ indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
+ if os.path.exists(indices_cache_file_name) and load_from_cache_file:
+ logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}")
+ return self._new_dataset_with_indices(
+ fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
+ )
+
+ permutation = generator.permutation(len(self))
+
+ return self.select(
+ indices=permutation,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False,
+ randomized_function=True,
+ fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"],
+ ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"],
+ )
+ def train_test_split(
+ self,
+ test_size: Union[float, int, None] = None,
+ train_size: Union[float, int, None] = None,
+ shuffle: bool = True,
+ stratify_by_column: Optional[str] = None,
+ seed: Optional[int] = None,
+ generator: Optional[np.random.Generator] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ train_indices_cache_file_name: Optional[str] = None,
+ test_indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ train_new_fingerprint: Optional[str] = None,
+ test_new_fingerprint: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits).
+ Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.
+
+ This method is similar to scikit-learn `train_test_split`.
+
+ Args:
+ test_size (`numpy.random.Generator`, *optional*):
+ Size of the test split
+ If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split.
+ If `int`, represents the absolute number of test samples.
+ If `None`, the value is set to the complement of the train size.
+ If `train_size` is also `None`, it will be set to `0.25`.
+ train_size (`numpy.random.Generator`, *optional*):
+ Size of the train split
+ If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split.
+ If `int`, represents the absolute number of train samples.
+ If `None`, the value is automatically set to the complement of the test size.
+ shuffle (`bool`, *optional*, defaults to `True`):
+ Whether or not to shuffle the data before splitting.
+ stratify_by_column (`str`, *optional*, defaults to `None`):
+ The column name of labels to be used to perform stratified split of data.
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the splits indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the splits indices
+ can be identified, use it instead of recomputing.
+ train_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ train split indices instead of the automatically generated cache file name.
+ test_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ test split indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ train_new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the train set after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ test_new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the test set after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.train_test_split(test_size=0.2, shuffle=True)
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 852
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 214
+ })
+ })
+
+ # set a seed
+ >>> ds = ds.train_test_split(test_size=0.2, seed=42)
+
+ # stratified split
+ >>> ds = load_dataset("imdb",split="train")
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 25000
+ })
+ >>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label")
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 20000
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 5000
+ })
+ })
+ ```
+ """
+ from .dataset_dict import DatasetDict # import here because of circular dependency
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return DatasetDict({"train": self, "test": self})
+
+ if test_size is None and train_size is None:
+ test_size = 0.25
+
+ # Safety checks similar to scikit-learn's ones.
+ # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)
+ n_samples = len(self)
+ if (
+ isinstance(test_size, int)
+ and (test_size >= n_samples or test_size <= 0)
+ or isinstance(test_size, float)
+ and (test_size <= 0 or test_size >= 1)
+ ):
+ raise ValueError(
+ f"test_size={test_size} should be either positive and smaller "
+ f"than the number of samples {n_samples} or a float in the (0, 1) range"
+ )
+
+ if (
+ isinstance(train_size, int)
+ and (train_size >= n_samples or train_size <= 0)
+ or isinstance(train_size, float)
+ and (train_size <= 0 or train_size >= 1)
+ ):
+ raise ValueError(
+ f"train_size={train_size} should be either positive and smaller "
+ f"than the number of samples {n_samples} or a float in the (0, 1) range"
+ )
+
+ if train_size is not None and not isinstance(train_size, (int, float)):
+ raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}")
+ if test_size is not None and not isinstance(test_size, (int, float)):
+ raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}")
+
+ if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:
+ raise ValueError(
+ f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)"
+ " range. Reduce test_size and/or train_size."
+ )
+
+ if isinstance(test_size, float):
+ n_test = ceil(test_size * n_samples)
+ elif isinstance(test_size, int):
+ n_test = float(test_size)
+
+ if isinstance(train_size, float):
+ n_train = floor(train_size * n_samples)
+ elif isinstance(train_size, int):
+ n_train = float(train_size)
+
+ if train_size is None:
+ n_train = n_samples - n_test
+ elif test_size is None:
+ n_test = n_samples - n_train
+
+ if n_train + n_test > n_samples:
+ raise ValueError(
+ f"The sum of train_size and test_size = {n_train + n_test}, "
+ "should be smaller than the number of "
+ f"samples {n_samples}. Reduce test_size and/or "
+ "train_size."
+ )
+
+ n_train, n_test = int(n_train), int(n_test)
+
+ if n_train == 0:
+ raise ValueError(
+ f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the "
+ "resulting train set will be empty. Adjust any of the "
+ "aforementioned parameters."
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if generator is None and shuffle is True:
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ _ = np.random.random() # do 1 step of rng
+ generator = np.random.default_rng(seed)
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if train_indices_cache_file_name is None or test_indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+
+ if train_indices_cache_file_name is None:
+ train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)
+ if test_indices_cache_file_name is None:
+ test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)
+ if (
+ os.path.exists(train_indices_cache_file_name)
+ and os.path.exists(test_indices_cache_file_name)
+ and load_from_cache_file
+ ):
+ logger.info(
+ f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}"
+ )
+ return DatasetDict(
+ {
+ "train": self._new_dataset_with_indices(
+ fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name
+ ),
+ "test": self._new_dataset_with_indices(
+ fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name
+ ),
+ }
+ )
+ if not shuffle:
+ if stratify_by_column is not None:
+ raise ValueError("Stratified train/test split is not implemented for `shuffle=False`")
+ train_indices = np.arange(n_train)
+ test_indices = np.arange(n_train, n_train + n_test)
+ else:
+ # stratified partition
+ if stratify_by_column is not None:
+ if stratify_by_column not in self._info.features.keys():
+ raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}")
+ if not isinstance(self._info.features[stratify_by_column], ClassLabel):
+ raise ValueError(
+ f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}."
+ )
+ try:
+ train_indices, test_indices = next(
+ stratified_shuffle_split_generate_indices(
+ self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator
+ )
+ )
+ except Exception as error:
+ if str(error) == "Minimum class count error":
+ raise ValueError(
+ f"The least populated class in {stratify_by_column} column has only 1"
+ " member, which is too few. The minimum"
+ " number of groups for any class cannot"
+ " be less than 2."
+ )
+ else:
+ raise error
+
+ # random partition
+ else:
+ permutation = generator.permutation(len(self))
+ test_indices = permutation[:n_test]
+ train_indices = permutation[n_test : (n_test + n_train)]
+
+ train_split = self.select(
+ indices=train_indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=train_indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=train_new_fingerprint,
+ )
+ test_split = self.select(
+ indices=test_indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=test_indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=test_new_fingerprint,
+ )
+
+ return DatasetDict({"train": train_split, "test": test_split})
+
+ def shard(
+ self,
+ num_shards: int,
+ index: int,
+ contiguous: bool = False,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "Dataset":
+ """Return the `index`-nth shard from dataset split into `num_shards` pieces.
+
+ This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose
+ index mod `n = i`.
+
+ `dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks,
+ so it can be easily concatenated back together after processing. If `n % i == l`, then the
+ first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`.
+ `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return
+ a dataset with the same order as the original.
+
+ Be sure to shard before using any randomizing operator (such as `shuffle`).
+ It is best if the shard operator is used early in the dataset pipeline.
+
+
+ Args:
+ num_shards (`int`):
+ How many shards to split the dataset into.
+ index (`int`):
+ Which shard to select and return.
+ contiguous: (`bool`, defaults to `False`):
+ Whether to select contiguous blocks of indices for shards.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ indices of each shard instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 1066
+ })
+ >>> ds.shard(num_shards=2, index=0)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ ```
+ """
+ if not 0 <= index < num_shards:
+ raise ValueError("index should be in [0, num_shards-1]")
+ if contiguous:
+ div = len(self) // num_shards
+ mod = len(self) % num_shards
+ start = div * index + min(index, mod)
+ end = start + div + (1 if index < mod else 0)
+ indices = range(start, end)
+ else:
+ indices = np.arange(index, len(self), num_shards)
+
+ return self.select(
+ indices=indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ )
+
+ @deprecated()
+ def export(
+ self,
+ filename: str,
+ format: str = "tfrecord",
+ ):
+ """Writes the Arrow dataset to a TFRecord file.
+
+ The dataset must already be in tensorflow format. The records will be written with
+ keys from `dataset._format_columns`.
+
+ Args:
+ filename (`str`): The filename, including the `.tfrecord` extension, to write to.
+ format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as
+ TFRecords are the only option. This enables a more flexible function signature later.
+ """
+ try:
+ import tensorflow as tf # noqa: F401
+ except ImportError:
+ logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
+
+ # From https://www.tensorflow.org/tutorials/load_data/tfrecord
+ def _bytes_feature(values):
+ """Returns a bytes_list from a list of string / byte."""
+ return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
+
+ def _float_feature(values):
+ """Returns a float_list from a list of float / double."""
+ return tf.train.Feature(float_list=tf.train.FloatList(value=values))
+
+ def _int64_feature(values):
+ """Returns an int64_list from a list of bool / enum / int / uint."""
+ return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
+
+ def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature":
+ """Typechecks `values` and returns the corresponding tf.train.Feature."""
+ if isinstance(values, list):
+ if values and isinstance(values[0], str):
+ return _bytes_feature([v.encode() for v in values])
+ else:
+ raise ValueError(f"values={values} is empty or contains items that cannot be serialized")
+ elif isinstance(values, np.ndarray):
+ if values.dtype == np.dtype(float):
+ return _float_feature(values)
+ elif values.dtype == np.int64:
+ return _int64_feature(values)
+ elif values.dtype == np.dtype(str) or (
+ values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str)
+ ):
+ return _bytes_feature([v.encode() for v in values])
+ else:
+ raise ValueError(
+ f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized"
+ )
+ elif hasattr(values, "dtype"):
+ if np.issubdtype(values.dtype, np.floating):
+ return _float_feature([values.item()])
+ elif np.issubdtype(values.dtype, np.integer):
+ return _int64_feature([values.item()])
+ elif np.issubdtype(values.dtype, str):
+ return _bytes_feature([values.item().encode()])
+ else:
+ raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized")
+ else:
+ raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized")
+
+ def serialize_example(ex):
+ feature = {key: _feature(value) for key, value in ex.items()}
+ example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
+ return example_proto.SerializeToString()
+
+ def tf_serialize_example(ex):
+ tf_string = tf.py_function(serialize_example, (ex,), tf.string)
+ return tf.reshape(tf_string, ())
+
+ def generator():
+ for ex in self:
+ yield serialize_example(ex)
+
+ if self._format_type != "numpy":
+ raise ValueError("Dataset format must be numpy before exporting")
+ if not filename.endswith(".tfrecord"):
+ raise ValueError("filename {filename} must end with .tfrecord")
+ tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=())
+ writer = tf.data.experimental.TFRecordWriter(filename)
+ logger.info(f"Writing TFRecord to {filename}")
+ writer.write(tf_dataset)
+ logger.info(f"Finished writing TFRecord to {filename}")
+ self = None # delete the dataset reference used by tf_dataset
+
+ def to_csv(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **to_csv_kwargs,
+ ) -> int:
+ """Exports the dataset to csv
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file (e.g. `file.csv`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.csv`),
+ or a BinaryIO, where the dataset will be saved to in the specified format.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing. `batch_size` in this case defaults to
+ `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
+ value if you have sufficient compute power.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+ **to_csv_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
+
+
+
+ Now, `index` defaults to `False` if not specified.
+
+ If you would like to write the index, pass `index=True` and also set a name for the index column by
+ passing `index_label`.
+
+
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_csv("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetWriter
+
+ return CsvDatasetWriter(
+ self,
+ path_or_buf,
+ batch_size=batch_size,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ **to_csv_kwargs,
+ ).write()
+
+ def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]:
+ """Returns the dataset as a Python dict. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+
+
+
+ Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.
+
+
+
+ batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+
+ Returns:
+ `dict` or `Iterator[dict]`
+
+ Example:
+
+ ```py
+ >>> ds.to_dict()
+ ```
+ """
+ if batched != "deprecated":
+ warnings.warn(
+ "'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.",
+ FutureWarning,
+ )
+ else:
+ batched = False
+
+ if not batched:
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pydict()
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices,
+ ).to_pydict()
+ for offset in range(0, len(self), batch_size)
+ )
+
+ def to_list(self) -> list:
+ """Returns the dataset as a Python list.
+
+ Returns:
+ `list`
+
+ Example:
+
+ ```py
+ >>> ds.to_list()
+ ```
+ """
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pylist()
+
+ def to_json(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **to_json_kwargs,
+ ) -> int:
+ """Export the dataset to JSON Lines or JSON.
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file (e.g. `file.json`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.json`),
+ or a BinaryIO, where the dataset will be saved to in the specified format.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing. `batch_size` in this case defaults to
+ `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
+ value if you have sufficient compute power.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+ **to_json_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
+
+
+
+ Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`.
+
+ If you would like to write the index, pass `index=True`.
+
+
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_json("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetWriter
+
+ return JsonDatasetWriter(
+ self,
+ path_or_buf,
+ batch_size=batch_size,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ **to_json_kwargs,
+ ).write()
+
+ def to_pandas(
+ self, batch_size: Optional[int] = None, batched: bool = False
+ ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
+ """Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+ batch_size (`int`, *optional*):
+ The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+
+ Returns:
+ `pandas.DataFrame` or `Iterator[pandas.DataFrame]`
+
+ Example:
+
+ ```py
+ >>> ds.to_pandas()
+ ```
+ """
+ if not batched:
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pandas(types_mapper=pandas_types_mapper)
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices,
+ ).to_pandas(types_mapper=pandas_types_mapper)
+ for offset in range(0, len(self), batch_size)
+ )
+
+ def to_polars(
+ self,
+ batch_size: Optional[int] = None,
+ batched: bool = False,
+ schema_overrides: Optional[dict] = None,
+ rechunk: bool = True,
+ ) -> Union["pl.DataFrame", Iterator["pl.DataFrame"]]:
+ """Returns the dataset as a `polars.DataFrame`. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+ batch_size (`int`, *optional*):
+ The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `genomicsml.datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ schema_overrides (`dict`, *optional*):
+ Support type specification or override of one or more columns; note that
+ any dtypes inferred from the schema param will be overridden.
+ rechunk (`bool`):
+ Make sure that all data is in contiguous memory. Defaults to `True`.
+ Returns:
+ `polars.DataFrame` or `Iterator[polars.DataFrame]`
+
+ Example:
+
+ ```py
+ >>> ds.to_polars()
+ ```
+ """
+ if config.POLARS_AVAILABLE:
+ import polars as pl
+
+ if not batched:
+ return pl.from_arrow(
+ query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices if self._indices is not None else None,
+ ),
+ schema_overrides=schema_overrides,
+ rechunk=rechunk,
+ )
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ pl.from_arrow(
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices if self._indices is not None else None,
+ ),
+ schema_overrides=schema_overrides,
+ rechunk=rechunk,
+ )
+ for offset in range(0, len(self), batch_size)
+ )
+ else:
+ raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
+
+ def to_parquet(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **parquet_writer_kwargs,
+ ) -> int:
+ """Exports the dataset to parquet
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file (e.g. `file.parquet`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.parquet`),
+ or a BinaryIO, where the dataset will be saved to in the specified format.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+ **parquet_writer_kwargs (additional keyword arguments):
+ Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`.
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_parquet("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetWriter
+
+ return ParquetDatasetWriter(
+ self, path_or_buf, batch_size=batch_size, storage_options=storage_options, **parquet_writer_kwargs
+ ).write()
+
+ def to_sql(
+ self,
+ name: str,
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ batch_size: Optional[int] = None,
+ **sql_writer_kwargs,
+ ) -> int:
+ """Exports the dataset to a SQL database.
+
+ Args:
+ name (`str`):
+ Name of SQL table.
+ con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
+ A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ **sql_writer_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html).
+
+
+
+ Now, `index` defaults to `False` if not specified.
+
+ If you would like to write the index, pass `index=True` and also set a name for the index column by
+ passing `index_label`.
+
+
+
+ Returns:
+ `int`: The number of records written.
+
+ Example:
+
+ ```py
+ >>> # con provided as a connection URI string
+ >>> ds.to_sql("data", "sqlite:///my_own_db.sql")
+ >>> # con provided as a sqlite3 connection object
+ >>> import sqlite3
+ >>> con = sqlite3.connect("my_own_db.sql")
+ >>> with con:
+ ... ds.to_sql("data", con)
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.sql import SqlDatasetWriter
+
+ return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write()
+
+ def _estimate_nbytes(self) -> int:
+ dataset_nbytes = self.data.nbytes
+
+ # Find decodable columns, because if there are any, we need to
+ # adjust the dataset size computation (needed for sharding) to account for possible external files
+ decodable_columns = [
+ k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)
+ ]
+
+ if decodable_columns:
+ # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples
+ extra_nbytes = 0
+
+ def extra_nbytes_visitor(array, feature):
+ nonlocal extra_nbytes
+ if isinstance(feature, (Audio, Image)):
+ for x in array.to_pylist():
+ if x is not None and x["bytes"] is None and x["path"] is not None:
+ size = xgetsize(x["path"])
+ extra_nbytes += size
+ extra_nbytes -= array.field("path").nbytes
+
+ table = self.with_format("arrow")[:1000]
+ table_visitor(table, extra_nbytes_visitor)
+
+ extra_nbytes = extra_nbytes * len(self.data) / len(table)
+ dataset_nbytes = dataset_nbytes + extra_nbytes
+
+ if self._indices is not None:
+ dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data)
+ return dataset_nbytes
+
+ @staticmethod
+ def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int):
+ for shard_idx, shard in enumerate(shards):
+ for pa_table in shard.with_format("arrow").iter(batch_size):
+ yield shard_idx, pa_table
+
+ @staticmethod
+ def _generate_tables_from_cache_file(filename: str):
+ for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)):
+ yield batch_idx, pa.Table.from_batches([batch])
+
+ def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset":
+ """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`].
+ This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files.
+
+ Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop).
+ Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets.
+ All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset.
+
+ Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`].
+ This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough.
+
+ To get the best speed performance, make sure your dataset doesn't have an indices mapping.
+ If this is the case, the data are not read contiguously, which can be slow sometimes.
+ You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset.
+
+ Args:
+ num_shards (`int`, default to `1`):
+ Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly,
+ and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example.
+ Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk.
+
+ Returns:
+ [`datasets.IterableDataset`]
+
+ Example:
+
+ Basic usage:
+ ```python
+ >>> ids = ds.to_iterable_dataset()
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With lazy filtering and processing:
+ ```python
+ >>> ids = ds.to_iterable_dataset()
+ >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With sharding to enable efficient shuffling:
+ ```python
+ >>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With a PyTorch DataLoader:
+ ```python
+ >>> import torch
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.filter(filter_fn).map(process_fn)
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With a PyTorch DataLoader and shuffling:
+ ```python
+ >>> import torch
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling
+ ```python
+ >>> from datasets.distributed import split_dataset_by_node
+ >>> ids = ds.to_iterable_dataset(num_shards=512)
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With shuffling and multiple epochs:
+ ```python
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> for epoch in range(n_epochs):
+ ... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating
+ ... for example in ids:
+ ... pass
+ ```
+ Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups.
+ """
+ from .iterable_dataset import ArrowExamplesIterable, IterableDataset
+
+ if self._format_type is not None:
+ raise NotImplementedError(
+ "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset"
+ )
+ if num_shards > len(self):
+ raise ValueError(
+ f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)."
+ )
+ if self._indices is not None:
+ logger.info(
+ "Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. "
+ "You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed."
+ )
+ shards = (
+ [copy.deepcopy(self)]
+ if num_shards == 1
+ else [
+ self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards)
+ ]
+ )
+ ex_iterable = ArrowExamplesIterable(
+ Dataset._generate_tables_from_shards,
+ kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE},
+ )
+ return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features))
+
+ def _push_parquet_shards_to_hub(
+ self,
+ repo_id: str,
+ data_dir: str = "data",
+ split: Optional[str] = None,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[int] = None,
+ embed_external_files: bool = True,
+ ) -> Tuple[str, str, int, int, List[str], int]:
+ """Pushes the dataset shards as Parquet files to the hub.
+
+ Returns:
+ additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards
+ uploaded_size (`int`): number of uploaded bytes to the repository
+ dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression
+ """
+ # Find decodable columns, because if there are any, we need to:
+ # embed the bytes from the files in the shards
+ decodable_columns = (
+ [k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)]
+ if embed_external_files
+ else []
+ )
+
+ dataset_nbytes = self._estimate_nbytes()
+
+ if num_shards is None:
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
+ num_shards = max(num_shards, 1)
+
+ shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
+
+ if decodable_columns:
+
+ def shards_with_embedded_external_files(shards):
+ for shard in shards:
+ format = shard.format
+ shard = shard.with_format("arrow")
+ shard = shard.map(
+ embed_table_storage,
+ batched=True,
+ batch_size=1000,
+ keep_in_memory=True,
+ )
+ shard = shard.with_format(**format)
+ yield shard
+
+ shards = shards_with_embedded_external_files(shards)
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ uploaded_size = 0
+ additions = []
+ for index, shard in hf_tqdm(
+ enumerate(shards),
+ desc="Uploading the dataset shards",
+ total=num_shards,
+ ):
+ shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet"
+ buffer = BytesIO()
+ shard.to_parquet(buffer)
+ uploaded_size += buffer.tell()
+ shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer)
+ api.preupload_lfs_files(
+ repo_id=repo_id,
+ additions=[shard_addition],
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ additions.append(shard_addition)
+
+ return additions, uploaded_size, dataset_nbytes
+
+ def push_to_hub(
+ self,
+ repo_id: str,
+ config_name: str = "default",
+ set_default: Optional[bool] = None,
+ split: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ commit_description: Optional[str] = None,
+ private: Optional[bool] = False,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ branch="deprecated",
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[int] = None,
+ embed_external_files: bool = True,
+ ) -> CommitInfo:
+ """Pushes the dataset to the hub as a Parquet dataset.
+ The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
+
+ The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`]
+ data, the Parquet files will store the bytes of your images or audio files.
+ You can disable this by setting `embed_external_files` to `False`.
+
+ Args:
+ repo_id (`str`):
+ The ID of the repository to push to in the following format: `/` or
+ `/`. Also accepts ``, which will default to the namespace
+ of the logged-in user.
+ config_name (`str`, defaults to "default"):
+ The configuration name (or subset) of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
+ split (`str`, *optional*):
+ The name of the split that will be given to that dataset. Defaults to `self.split`.
+ data_dir (`str`, *optional*):
+ Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
+ from "default", else "data".
+
+
+ commit_message (`str`, *optional*):
+ Message to commit while pushing. Will default to `"Upload dataset"`.
+ commit_description (`str`, *optional*):
+ Description of the commit that will be created.
+ Additionally, description of the PR if a PR is created (`create_pr` is True).
+
+
+ private (`bool`, *optional*, defaults to `False`):
+ Whether the dataset repository should be set to private or not. Only affects repository creation:
+ a repository that already exists will not be affected by that parameter.
+ token (`str`, *optional*):
+ An optional authentication token for the Hugging Face Hub. If no token is passed, will default
+ to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
+ if no token is passed and the user is not logged-in.
+ revision (`str`, *optional*):
+ Branch to push the uploaded files to. Defaults to the `"main"` branch.
+
+
+ branch (`str`, *optional*):
+ The git branch on which to push the dataset. This defaults to the default branch as specified
+ in your repository, which defaults to `"main"`.
+
+
+
+ `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0.
+
+
+ create_pr (`bool`, *optional*, defaults to `False`):
+ Whether to create a PR with the uploaded files or directly commit.
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by
+ a unit (like `"5MB"`).
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
+
+
+ embed_external_files (`bool`, defaults to `True`):
+ Whether to embed file bytes in the shards.
+ In particular, this will do the following before the push for the fields of type:
+
+ - [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files.
+
+ Return:
+ huggingface_hub.CommitInfo
+
+ Example:
+
+ ```python
+ >>> dataset.push_to_hub("/")
+ >>> dataset_dict.push_to_hub("/", private=True)
+ >>> dataset.push_to_hub("/", max_shard_size="1GB")
+ >>> dataset.push_to_hub("/", num_shards=1024)
+ ```
+
+ If your dataset has multiple splits (e.g. train/validation/test):
+
+ ```python
+ >>> train_dataset.push_to_hub("/", split="train")
+ >>> val_dataset.push_to_hub("/", split="validation")
+ >>> # later
+ >>> dataset = load_dataset("/")
+ >>> train_dataset = dataset["train"]
+ >>> val_dataset = dataset["validation"]
+ ```
+
+ If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
+
+ ```python
+ >>> english_dataset.push_to_hub("/", "en")
+ >>> french_dataset.push_to_hub("/", "fr")
+ >>> # later
+ >>> english_dataset = load_dataset("/", "en")
+ >>> french_dataset = load_dataset("/", "fr")
+ ```
+ """
+ if config_name == "data":
+ raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.")
+
+ if max_shard_size is not None and num_shards is not None:
+ raise ValueError(
+ "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
+ )
+
+ if split is None:
+ split = str(self.split) if self.split is not None else "train"
+
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
+
+ if branch != "deprecated":
+ warnings.warn(
+ "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'revision={branch}' instead.",
+ FutureWarning,
+ )
+ revision = branch
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ repo_url = api.create_repo(
+ repo_id,
+ token=token,
+ repo_type="dataset",
+ private=private,
+ exist_ok=True,
+ )
+ repo_id = repo_url.repo_id
+
+ if revision is not None:
+ api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
+
+ if not data_dir:
+ data_dir = config_name if config_name != "default" else "data" # for backward compatibility
+
+ additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub(
+ repo_id=repo_id,
+ data_dir=data_dir,
+ split=split,
+ token=token,
+ revision=revision,
+ max_shard_size=max_shard_size,
+ num_shards=num_shards,
+ create_pr=create_pr,
+ embed_external_files=embed_external_files,
+ )
+
+ # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
+ # and delete old split shards (if they exist)
+ repo_with_dataset_card, repo_with_dataset_infos = False, False
+ deletions, deleted_size = [], 0
+ repo_splits = [] # use a list to keep the order of the splits
+ repo_files_to_add = [addition.path_in_repo for addition in additions]
+ for repo_file in api.list_repo_tree(
+ repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True
+ ):
+ if not isinstance(repo_file, RepoFile):
+ continue
+ if repo_file.rfilename == config.REPOCARD_FILENAME:
+ repo_with_dataset_card = True
+ elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
+ repo_with_dataset_infos = True
+ elif (
+ repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add
+ ):
+ deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
+ deleted_size += repo_file.size
+ elif fnmatch.fnmatch(
+ repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
+ ):
+ repo_split = string_to_dict(
+ repo_file.rfilename,
+ glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
+ )["split"]
+ if repo_split not in repo_splits:
+ repo_splits.append(repo_split)
+
+ organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
+ info_to_dump = self.info.copy()
+ info_to_dump.download_checksums = None
+ info_to_dump.download_size = uploaded_size
+ info_to_dump.dataset_size = dataset_nbytes
+ info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes
+ info_to_dump.config_name = config_name
+ info_to_dump.splits = SplitDict(
+ {split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)}
+ )
+ # get the info from the README to update them
+ if repo_with_dataset_card:
+ dataset_card_path = api.hf_hub_download(
+ repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
+ )
+ dataset_card = DatasetCard.load(Path(dataset_card_path))
+ dataset_card_data = dataset_card.data
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ if dataset_infos and config_name in dataset_infos:
+ repo_info = dataset_infos[config_name]
+ else:
+ repo_info = None
+ # get the deprecated dataset_infos.json to update them
+ elif repo_with_dataset_infos:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None
+ repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ repo_info = None
+ # update the total info to dump from existing info
+ if repo_info is not None:
+ logger.info("Updating downloaded metadata with the new split.")
+ if repo_info.splits and list(repo_info.splits) != [split]:
+ if self._info.features != repo_info.features:
+ raise ValueError(
+ f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}"
+ )
+
+ if split in repo_info.splits:
+ repo_info.download_size -= deleted_size
+ repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0
+
+ repo_info.download_checksums = None
+ repo_info.download_size = (repo_info.download_size or 0) + uploaded_size
+ repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes
+ repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size
+ repo_info.splits.pop(split, None)
+ repo_info.splits[split] = SplitInfo(
+ split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name
+ )
+ info_to_dump = repo_info
+ # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
+ if not metadata_configs and repo_splits:
+ default_metadata_configs_to_dump = {
+ "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
+ }
+ MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
+ # update the metadata configs
+ if config_name in metadata_configs:
+ metadata_config = metadata_configs[config_name]
+ if "data_files" in metadata_config:
+ data_files_to_dump = sanitize_patterns(metadata_config["data_files"])
+ else:
+ data_files_to_dump = {}
+ # add the new split
+ data_files_to_dump[split] = [f"{data_dir}/{split}-*"]
+ metadata_config_to_dump = {
+ "data_files": [
+ {
+ "split": _split,
+ "path": _pattern[0] if len(_pattern) == 1 else _pattern,
+ }
+ for _split, _pattern in data_files_to_dump.items()
+ ]
+ }
+ else:
+ metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]}
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
+ # push to the deprecated dataset_infos.json
+ if repo_with_dataset_infos:
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_infos[config_name] = asdict(info_to_dump)
+ buffer = BytesIO()
+ buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
+ )
+ # push to README
+ DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
+ MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
+ dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
+ )
+
+ commit_message = commit_message if commit_message is not None else "Upload dataset"
+ if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
+ commit_info = api.create_commit(
+ repo_id,
+ operations=additions + deletions,
+ commit_message=commit_message,
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ else:
+ logger.info(
+ f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
+ )
+ num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
+ for i in range(0, num_commits):
+ operations = additions[
+ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
+ ] + (deletions if i == 0 else [])
+ commit_info = api.create_commit(
+ repo_id,
+ operations=operations,
+ commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ logger.info(
+ f"Commit #{i+1} completed"
+ + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ + "."
+ )
+ return commit_info
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str):
+ """Add column to Dataset.
+
+
+
+ Args:
+ name (`str`):
+ Column name.
+ column (`list` or `np.array`):
+ Column data to be added.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> more_text = ds["text"]
+ >>> ds.add_column(name="text_2", column=more_text)
+ Dataset({
+ features: ['text', 'label', 'text_2'],
+ num_rows: 1066
+ })
+ ```
+ """
+ column_table = InMemoryTable.from_pydict({name: column})
+ _check_column_names(self._data.column_names + column_table.column_names)
+ dataset = self.flatten_indices() if self._indices is not None else self
+ # Concatenate tables horizontally
+ table = concat_tables([dataset._data, column_table], axis=1)
+ # Update features
+ info = dataset.info.copy()
+ info.features.update(Features.from_arrow_schema(column_table.schema))
+ table = update_metadata_with_features(table, info.features)
+ return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint)
+
+ def add_faiss_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ device: Optional[int] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None, # noqa: F821
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ dtype=np.float32,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ By default the index is done over the vectors of the specified column.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+
+ - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
+
+ Args:
+ column (`str`):
+ The column of the vectors to add to the index.
+ index_name (`str`, *optional*):
+ The `index_name`/identifier of the index.
+ This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
+ By default it corresponds to `column`.
+ device (`Union[int, List[int]]`, *optional*):
+ If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (`str`, *optional*):
+ This is passed to the index factory of Faiss to create the index.
+ Default index class is `IndexFlat`.
+ metric_type (`int`, *optional*):
+ Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (`faiss.Index`, *optional*):
+ Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (`int`):
+ Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`.
+
+ train_size (`int`, *optional*):
+ If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to `False`):
+ Enable the verbosity of the Faiss index.
+ dtype (`data-type`):
+ The dtype of the numpy arrays that are indexed.
+ Default is `np.float32`.
+
+ Example:
+
+ ```python
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))
+ >>> ds_with_embeddings.add_faiss_index(column='embeddings')
+ >>> # query
+ >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)
+ >>> # save index
+ >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')
+
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> # load index
+ >>> ds.load_faiss_index('embeddings', 'my_index.faiss')
+ >>> # query
+ >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)
+ ```
+ """
+ with self.formatted_as(type="numpy", columns=[column], dtype=dtype):
+ super().add_faiss_index(
+ column=column,
+ index_name=index_name,
+ device=device,
+ string_factory=string_factory,
+ metric_type=metric_type,
+ custom_index=custom_index,
+ batch_size=batch_size,
+ train_size=train_size,
+ faiss_verbose=faiss_verbose,
+ )
+ return self
+
+ def add_faiss_index_from_external_arrays(
+ self,
+ external_arrays: np.array,
+ index_name: str,
+ device: Optional[int] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None, # noqa: F821
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ dtype=np.float32,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of `external_arrays`.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+
+ - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
+
+ Args:
+ external_arrays (`np.array`):
+ If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
+ It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
+ device (Optional `Union[int, List[int]]`, *optional*):
+ If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (`str`, *optional*):
+ This is passed to the index factory of Faiss to create the index.
+ Default index class is `IndexFlat`.
+ metric_type (`int`, *optional*):
+ Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (`faiss.Index`, *optional*):
+ Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (`int`, *optional*):
+ Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (`int`, *optional*):
+ If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False):
+ Enable the verbosity of the Faiss index.
+ dtype (`numpy.dtype`):
+ The dtype of the numpy arrays that are indexed. Default is np.float32.
+ """
+ super().add_faiss_index_from_external_arrays(
+ external_arrays=external_arrays.astype(dtype),
+ index_name=index_name,
+ device=device,
+ string_factory=string_factory,
+ metric_type=metric_type,
+ custom_index=custom_index,
+ batch_size=batch_size,
+ train_size=train_size,
+ faiss_verbose=faiss_verbose,
+ )
+
+ def add_elasticsearch_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Add a text index using ElasticSearch for fast retrieval. This is done in-place.
+
+ Args:
+ column (`str`):
+ The column of the documents to add to the index.
+ index_name (`str`, *optional*):
+ The `index_name`/identifier of the index.
+ This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`~Dataset.search`].
+ By default it corresponds to `column`.
+ host (`str`, *optional*, defaults to `localhost`):
+ Host of where ElasticSearch is running.
+ port (`str`, *optional*, defaults to `9200`):
+ Port of where ElasticSearch is running.
+ es_client (`elasticsearch.Elasticsearch`, *optional*):
+ The elasticsearch client used to create the index if host and port are `None`.
+ es_index_name (`str`, *optional*):
+ The elasticsearch index name used to create the index.
+ es_index_config (`dict`, *optional*):
+ The configuration of the elasticsearch index.
+ Default config is:
+ ```
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ ```
+ Example:
+
+ ```python
+ >>> es_client = elasticsearch.Elasticsearch()
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index")
+ >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)
+ ```
+ """
+ with self.formatted_as(type=None, columns=[column]):
+ super().add_elasticsearch_index(
+ column=column,
+ index_name=index_name,
+ host=host,
+ port=port,
+ es_client=es_client,
+ es_index_name=es_index_name,
+ es_index_config=es_index_config,
+ )
+ return self
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def add_item(self, item: dict, new_fingerprint: str):
+ """Add item to Dataset.
+
+
+
+ Args:
+ item (`dict`):
+ Item data to be added.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
+ >>> ds = ds.add_item(new_review)
+ >>> ds[-1]
+ {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
+ ```
+ """
+ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()})
+ # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe"
+ dset_features, item_features = _align_features(
+ [self._info.features, Features.from_arrow_schema(item_table.schema)]
+ )
+ # Cast to align the schemas of the tables and concatenate the tables
+ table = concat_tables(
+ [
+ self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data,
+ item_table.cast(item_features.arrow_schema),
+ ]
+ )
+ if self._indices is None:
+ indices_table = None
+ else:
+ item_indices_array = pa.array([len(self._data)], type=pa.uint64())
+ item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"])
+ indices_table = concat_tables([self._indices, item_indices_table])
+ info = self.info.copy()
+ info.features.update(item_features)
+ table = update_metadata_with_features(table, info.features)
+ return Dataset(
+ table,
+ info=info,
+ split=self.split,
+ indices_table=indices_table,
+ fingerprint=new_fingerprint,
+ )
+
+ def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset":
+ """Align the dataset's label ID and label name mapping to match an input `label2id` mapping.
+ This is useful when you want to ensure that a model's predicted labels are aligned with the dataset.
+ The alignment in done using the lowercase label names.
+
+ Args:
+ label2id (`dict`):
+ The label name to ID mapping to align the dataset with.
+ label_column (`str`):
+ The column name of labels to align on.
+
+ Example:
+
+ ```python
+ >>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2}
+ >>> ds = load_dataset("glue", "mnli", split="train")
+ >>> # mapping to align with
+ >>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
+ >>> ds_aligned = ds.align_labels_with_mapping(label2id, "label")
+ ```
+
+ """
+ # Sanity checks
+ if label_column not in self._data.column_names:
+ raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).")
+
+ label_feature = self._info.features[label_column]
+ if not (
+ isinstance(label_feature, ClassLabel)
+ or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel))
+ ):
+ raise ValueError(
+ f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}."
+ )
+
+ # Sort input mapping by ID value to ensure the label names are aligned
+ label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))
+ label_names = list(label2id.keys())
+ # Some label mappings use uppercase label names so we lowercase them during alignment
+ label2id = {k.lower(): v for k, v in label2id.items()}
+ int2str_function = (
+ label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str
+ )
+
+ if isinstance(label_feature, ClassLabel):
+
+ def process_label_ids(batch):
+ dset_label_names = [
+ int2str_function(label_id).lower() if label_id is not None else None
+ for label_id in batch[label_column]
+ ]
+ batch[label_column] = [
+ label2id[label_name] if label_name is not None else None for label_name in dset_label_names
+ ]
+ return batch
+
+ else:
+
+ def process_label_ids(batch):
+ dset_label_names = [
+ [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq]
+ for seq in batch[label_column]
+ ]
+ batch[label_column] = [
+ [label2id[label_name] if label_name is not None else None for label_name in seq]
+ for seq in dset_label_names
+ ]
+ return batch
+
+ features = self.features
+ features[label_column] = (
+ ClassLabel(num_classes=len(label_names), names=label_names)
+ if isinstance(label_feature, ClassLabel)
+ else Sequence(ClassLabel(num_classes=len(label_names), names=label_names))
+ )
+ return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels")
+
+
+def _concatenate_map_style_datasets(
+ dsets: List[Dataset],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+):
+ """
+ Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`.
+ When you concatenate on axis 0, missing data are filled with None values.
+
+ Args:
+ dsets (`List[datasets.Dataset]`): List of Datasets to concatenate.
+ info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (:class:`NamedSplit`, optional): Name of the dataset split.
+ axis (``{0, 1}``, default ``0``, meaning over rows):
+ Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
+ (horizontally).
+
+ *New in version 1.6.0*
+
+ Example:
+
+ ```py
+ >>> ds3 = _concatenate_map_style_datasets([ds1, ds2])
+ ```
+ """
+ # Ignore datasets with no rows
+ if any(dset.num_rows > 0 for dset in dsets):
+ dsets = [dset for dset in dsets if dset.num_rows > 0]
+ else:
+ # Return first dataset if all datasets are empty
+ return dsets[0]
+
+ # Perform checks (and a potentional cast if axis=0)
+ if axis == 0:
+ _check_if_features_can_be_aligned([dset.features for dset in dsets])
+ else:
+ if not all(dset.num_rows == dsets[0].num_rows for dset in dsets):
+ raise ValueError("Number of rows must match for all datasets")
+ _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names])
+
+ # Find common format or reset format
+ format = dsets[0].format
+ if any(dset.format != format for dset in dsets):
+ format = {}
+ logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.")
+
+ def apply_offset_to_indices_table(table, offset):
+ if offset == 0:
+ return table
+ else:
+ array = table["indices"]
+ new_array = pc.add(array, pa.scalar(offset, type=pa.uint64()))
+ return InMemoryTable.from_arrays([new_array], names=["indices"])
+
+ # Concatenate indices if they exist
+ if any(dset._indices is not None for dset in dsets):
+ if axis == 0:
+ # Datasets with no indices tables are replaced with a dataset with an indices table in memory.
+ # Applying an offset to an indices table also brings the table in memory.
+ indices_tables = []
+ for i in range(len(dsets)):
+ if dsets[i]._indices is None:
+ dsets[i] = dsets[i]._select_with_indices_mapping(range(len(dsets[i])))
+ indices_tables.append(dsets[i]._indices)
+
+ # An offset needs to be applied to the indices before concatenating
+ offset = 0
+ for i in range(len(dsets)):
+ indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset)
+ offset += len(dsets[i]._data)
+
+ # Concatenate indices
+ indices_tables = [t for t in indices_tables if len(t) > 0]
+ if indices_tables:
+ indices_table = concat_tables(indices_tables)
+ else:
+ indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()}))
+ else:
+ if len(dsets) == 1:
+ indices_table = dsets[0]._indices
+ else:
+ for i in range(len(dsets)):
+ dsets[i] = dsets[i].flatten_indices()
+ indices_table = None
+ else:
+ indices_table = None
+
+ table = concat_tables([dset._data for dset in dsets], axis=axis)
+ if axis == 0:
+ features_list = _align_features([dset.features for dset in dsets])
+ else:
+ features_list = [dset.features for dset in dsets]
+ table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()})
+
+ # Concatenate infos
+ if info is None:
+ info = DatasetInfo.from_merge([dset.info for dset in dsets])
+ fingerprint = update_fingerprint(
+ "".join(dset._fingerprint for dset in dsets), _concatenate_map_style_datasets, {"info": info, "split": split}
+ )
+
+ # Make final concatenated dataset
+ concatenated_dataset = Dataset(
+ table,
+ info=info,
+ split=split,
+ indices_table=indices_table,
+ fingerprint=fingerprint,
+ )
+ concatenated_dataset.set_format(**format)
+ return concatenated_dataset
+
+
+def _interleave_map_style_datasets(
+ datasets: List["Dataset"],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ **kwargs,
+) -> "Dataset":
+ """
+ Interleave several map-style datasets (sources) into a single map-style dataset.
+ The new dataset is constructed by alternating between the sources to get the examples.
+ If `probabilities = None` (default) the new dataset is constructed by cycling between each source to get the examples.
+ If `probabilities` is not `None, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
+
+ Args:
+ datasets (`List[Dataset]`): list of datasets to interleave
+ probabilities (`List[float]`, optional, default None): If specified, the new dataset is constructed by sampling
+ examples from one source at a time according to these probabilities.
+ seed (`int`, optional, default None): The random seed used to choose a source for each example.
+ info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (:class:`NamedSplit`, optional): Name of the dataset split.
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+ **kwargs (additional keyword arguments): Keyword arguments to be passed to :meth:`datasets.Datasets.select` when selecting the indices used to interleave the datasets.
+
+ Output:
+ :class:`datasets.Dataset`
+ """
+ if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
+ raise ValueError(
+ f"{stopping_strategy} stopping strategy in `interleave_datasets` is not implemented yet with a list of {type(datasets[0])}"
+ )
+
+ # To interleave the datasets, we concatenate them and then we re-order the indices
+ concatenated_datasets = _concatenate_map_style_datasets(datasets, info=info, split=split)
+
+ # Let's now build the indices to pass to .select()
+ lengths = [len(dset) for dset in datasets]
+ offsets = np.cumsum([0] + lengths[:-1])
+
+ # if stopping_strategy is "first_exhausted", it is an undersampling situation whereas it is an oversampling situation if it is "all_exhausted"
+ oversampling = stopping_strategy == "all_exhausted"
+
+ if probabilities is None and not oversampling:
+ # Undersampling situation with cycling between each sources
+ # Example:: If lengths of the datasets are [3, 4, 5]
+ # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 6, 9]
+ # Note that we only have 3 examples per dataset since the first dataset ran out of examples
+
+ # Reasoning behind the following operation: keeping the min_length first indices of each dataset
+ # while offsetting in order to correspond to the right indices of the concatenated dataset
+ # and flattening to effectively interleave the datasets
+ indices = (offsets.reshape(1, -1) + np.arange(min(lengths)).reshape(-1, 1)).flatten().tolist()
+ elif probabilities is None:
+ # Oversampling situation with cycling between each sources
+ # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 5, 9, 0, 6, 10, 1, 3, 11]
+ # Note that we have 5 examples per dataset with a rolling window since the longest dataset has 5 samples
+
+ # Reasoning behind the following operation: for each dataset indices (i.e column) repeat the indices to have max_length indices per dataset
+ # For example, if the max_length is 5 and the i-th dataset has 3 samples, the i-th column will be [0,1,2,0,1]
+ indices = np.mod(np.arange(max(lengths)).reshape(-1, 1), np.array(lengths).reshape(1, -1))
+
+ # We have to keep the indices to their respective dataset offsets and to flatten to effectively interleave the datasets
+ indices = (indices + offsets).flatten().tolist()
+
+ else:
+ # boolean array indicating if at index i if the dataset_i has been fully exhausted
+ is_exhausted = np.full(len(lengths), False)
+
+ # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
+ # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
+ bool_strategy_func = np.all if oversampling else np.any
+
+ def iter_random_indices():
+ """Get an infinite iterator that randomly samples the index of the source to pick examples from."""
+ rng = np.random.default_rng(seed)
+ while True:
+ yield from (int(i) for i in rng.choice(len(datasets), size=1000, p=probabilities))
+
+ current_index = [0] * len(datasets)
+ indices = []
+ for source_idx in iter_random_indices():
+ # If no oversampling, we stop as soon as a dataset has ran out of examples (np.any)
+ # Otherwise, we stop as soon as every dataset has ran out of examples (np.all)
+ if bool_strategy_func(is_exhausted):
+ # the stopping condition was reached, let's stop
+ break
+
+ # let's add the example at the current index of the `source_idx`-th dataset
+ indices.append(current_index[source_idx] + offsets[source_idx])
+ current_index[source_idx] += 1
+
+ # we've ran out of examples for the current dataset, let's update our boolean array and bring the current_index back to 0
+ if current_index[source_idx] >= lengths[source_idx]:
+ is_exhausted[source_idx] = True
+ current_index[source_idx] = 0
+
+ return concatenated_datasets.select(indices, **kwargs)
+
+
+def _split_by_node_map_style_dataset(dataset: Dataset, rank: int, world_size: int) -> Dataset:
+ """
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
+
+ Args:
+ dataset ([`Dataset`]):
+ The dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`Dataset`]: The dataset to be used on the node at rank `rank`.
+ """
+ return dataset.shard(num_shards=world_size, index=rank, contiguous=True)
+
+
+# This is outside Dataset.filter as it needs to be picklable for multiprocessing
+
+
+def get_indices_from_mask_function(
+ function: Callable,
+ batched: bool,
+ with_indices: bool,
+ with_rank: bool,
+ input_columns: Optional[Union[str, List[str]]],
+ indices_mapping: Optional[Table] = None,
+ *args,
+ **fn_kwargs,
+):
+ if batched:
+ # we extract indices and rank from args
+ *inputs, indices, rank = args
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices,)
+ if with_rank:
+ additional_args += (rank,)
+ mask = function(*inputs, *additional_args, **fn_kwargs)
+ else:
+ # we get batched data (to do less look-ups) but `function` only accepts one example
+ # therefore we need to call `function` on each example of the batch to get the mask
+ *inputs, indices, rank = args
+ mask = []
+ if input_columns is None:
+ # inputs only contains a batch of examples
+ batch: dict = inputs[0]
+ num_examples = len(batch[next(iter(batch.keys()))])
+ for i in range(num_examples):
+ example = {key: batch[key][i] for key in batch}
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(example, *additional_args, **fn_kwargs))
+ else:
+ # inputs is a list of columns
+ columns: List[List] = inputs
+ num_examples = len(columns[0])
+ for i in range(num_examples):
+ input = [column[i] for column in columns]
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(*input, *additional_args, **fn_kwargs))
+ indices_array = [i for i, to_keep in zip(indices, mask) if to_keep]
+ if indices_mapping is not None:
+ indices_array = pa.array(indices_array, type=pa.uint64())
+ indices_array = indices_mapping.column(0).take(indices_array)
+ indices_array = indices_array.to_pylist()
+ return {"indices": indices_array}
diff --git a/venv/lib/python3.10/site-packages/datasets/arrow_reader.py b/venv/lib/python3.10/site-packages/datasets/arrow_reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3b830a6596c4d6df8ba16b4c54c89ca26c990e5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/arrow_reader.py
@@ -0,0 +1,663 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Arrow ArrowReader."""
+
+import copy
+import math
+import os
+import re
+import shutil
+from dataclasses import dataclass
+from functools import partial
+from pathlib import Path
+from typing import TYPE_CHECKING, List, Optional, Union
+
+import pyarrow as pa
+import pyarrow.parquet as pq
+from tqdm.contrib.concurrent import thread_map
+
+from .download.download_config import DownloadConfig
+from .naming import _split_re, filenames_for_dataset_split
+from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import cached_path
+
+
+if TYPE_CHECKING:
+ from .info import DatasetInfo # noqa: F401
+ from .splits import Split, SplitInfo # noqa: F401
+
+
+logger = logging.get_logger(__name__)
+
+HF_GCP_BASE_URL = "https://storage.googleapis.com/huggingface-nlp/cache/datasets"
+
+_SUB_SPEC_RE = re.compile(
+ rf"""
+^
+ (?P{_split_re[1:-1]})
+ (\[
+ ((?P-?\d+)
+ (?P%)?)?
+ :
+ ((?P-?\d+)
+ (?P%)?)?
+ \])?(\((?P[^\)]*)\))?
+$
+""", # remove ^ and $
+ re.X,
+)
+
+_ADDITION_SEP_RE = re.compile(r"\s*\+\s*")
+
+
+class DatasetNotOnHfGcsError(ConnectionError):
+ """When you can't get the dataset from the Hf google cloud storage"""
+
+ pass
+
+
+class MissingFilesOnHfGcsError(ConnectionError):
+ """When some files are missing on the Hf oogle cloud storage"""
+
+ pass
+
+
+@dataclass(frozen=True)
+class FileInstructions:
+ """The file instructions associated with a split ReadInstruction.
+
+ Attributes:
+ num_examples: `int`, The total number of examples
+ file_instructions: List[dict(filename, skip, take)], the files information.
+ The filenames contains the relative path, not absolute.
+ skip/take indicates which example read in the file: `ds.slice(skip, take)`
+ """
+
+ num_examples: int
+ file_instructions: List[dict]
+
+
+def make_file_instructions(
+ name: str,
+ split_infos: List["SplitInfo"],
+ instruction: Union[str, "ReadInstruction"],
+ filetype_suffix: Optional[str] = None,
+ prefix_path: Optional[str] = None,
+) -> FileInstructions:
+ """Returns instructions of the split dict.
+
+ Args:
+ name (`str`): Name of the dataset.
+ split_infos (`list` of `[SplitInfo]`): Dataset splits information.
+ instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset.
+ filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'.
+ prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name.
+
+ Returns:
+ [`FileInstructions`]
+ """
+ if not isinstance(name, str):
+ raise TypeError(f"Expected str 'name', but got: {type(name).__name__}")
+ elif not name:
+ raise ValueError("Expected non-empty str 'name'")
+ name2len = {info.name: info.num_examples for info in split_infos}
+ name2shard_lengths = {info.name: info.shard_lengths for info in split_infos}
+ name2filenames = {
+ info.name: filenames_for_dataset_split(
+ path=prefix_path,
+ dataset_name=name,
+ split=info.name,
+ filetype_suffix=filetype_suffix,
+ shard_lengths=name2shard_lengths[info.name],
+ )
+ for info in split_infos
+ }
+ if not isinstance(instruction, ReadInstruction):
+ instruction = ReadInstruction.from_spec(instruction)
+ # Create the absolute instruction (per split)
+ absolute_instructions = instruction.to_absolute(name2len)
+
+ # For each split, return the files instruction (skip/take)
+ file_instructions = []
+ num_examples = 0
+ for abs_instr in absolute_instructions:
+ split_length = name2len[abs_instr.splitname]
+ filenames = name2filenames[abs_instr.splitname]
+ shard_lengths = name2shard_lengths[abs_instr.splitname]
+ from_ = 0 if abs_instr.from_ is None else abs_instr.from_
+ to = split_length if abs_instr.to is None else abs_instr.to
+ if shard_lengths is None: # not sharded
+ for filename in filenames:
+ take = to - from_
+ if take == 0:
+ continue
+ num_examples += take
+ file_instructions.append({"filename": filename, "skip": from_, "take": take})
+ else: # sharded
+ index_start = 0 # Beginning (included) of moving window.
+ index_end = 0 # End (excluded) of moving window.
+ for filename, shard_length in zip(filenames, shard_lengths):
+ index_end += shard_length
+ if from_ < index_end and to > index_start: # There is something to take.
+ skip = from_ - index_start if from_ > index_start else 0
+ take = to - index_start - skip if to < index_end else -1
+ if take == 0:
+ continue
+ file_instructions.append({"filename": filename, "skip": skip, "take": take})
+ num_examples += shard_length - skip if take == -1 else take
+ index_start += shard_length
+ return FileInstructions(
+ num_examples=num_examples,
+ file_instructions=file_instructions,
+ )
+
+
+class BaseReader:
+ """
+ Build a Dataset object out of Instruction instance(s).
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ArrowReader.
+
+ Args:
+ path (str): path where tfrecords are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ self._path: str = path
+ self._info: Optional["DatasetInfo"] = info
+ self._filetype_suffix: Optional[str] = None
+
+ def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ raise NotImplementedError
+
+ def _read_files(self, files, in_memory=False) -> Table:
+ """Returns Dataset for given file instructions.
+
+ Args:
+ files: List[dict(filename, skip, take)], the files information.
+ The filenames contain the absolute path, not relative.
+ skip/take indicates which example read in the file: `ds.slice(skip, take)`
+ in_memory (bool, default False): Whether to copy the data in-memory.
+ """
+ if len(files) == 0 or not all(isinstance(f, dict) for f in files):
+ raise ValueError("please provide valid file informations")
+ files = copy.deepcopy(files)
+ for f in files:
+ f["filename"] = os.path.join(self._path, f["filename"])
+
+ pa_tables = thread_map(
+ partial(self._get_table_from_filename, in_memory=in_memory),
+ files,
+ tqdm_class=hf_tqdm,
+ desc="Loading dataset shards",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(files) <= 16 or None,
+ )
+ pa_tables = [t for t in pa_tables if len(t) > 0]
+ if not pa_tables and (self._info is None or self._info.features is None):
+ raise ValueError(
+ "Tried to read an empty table. Please specify at least info.features to create an empty table with the right type."
+ )
+ pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))]
+ pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0]
+ return pa_table
+
+ def get_file_instructions(self, name, instruction, split_infos):
+ """Return list of dict {'filename': str, 'skip': int, 'take': int}"""
+ file_instructions = make_file_instructions(
+ name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path
+ )
+ files = file_instructions.file_instructions
+ return files
+
+ def read(
+ self,
+ name,
+ instructions,
+ split_infos,
+ in_memory=False,
+ ):
+ """Returns Dataset instance(s).
+
+ Args:
+ name (str): name of the dataset.
+ instructions (ReadInstruction): instructions to read.
+ Instruction can be string and will then be passed to the Instruction
+ constructor as it.
+ split_infos (list of SplitInfo proto): the available splits for dataset.
+ in_memory (bool, default False): Whether to copy the data in-memory.
+
+ Returns:
+ kwargs to build a single Dataset instance.
+ """
+
+ files = self.get_file_instructions(name, instructions, split_infos)
+ if not files:
+ msg = f'Instruction "{instructions}" corresponds to no data!'
+ raise ValueError(msg)
+ return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory)
+
+ def read_files(
+ self,
+ files: List[dict],
+ original_instructions: Union[None, "ReadInstruction", "Split"] = None,
+ in_memory=False,
+ ):
+ """Returns single Dataset instance for the set of file instructions.
+
+ Args:
+ files: List[dict(filename, skip, take)], the files information.
+ The filenames contains the relative path, not absolute.
+ skip/take indicates which example read in the file: `ds.skip().take()`
+ original_instructions: store the original instructions used to build the dataset split in the dataset.
+ in_memory (bool, default False): Whether to copy the data in-memory.
+
+ Returns:
+ kwargs to build a Dataset instance.
+ """
+ # Prepend path to filename
+ pa_table = self._read_files(files, in_memory=in_memory)
+ # If original_instructions is not None, convert it to a human-readable NamedSplit
+ if original_instructions is not None:
+ from .splits import Split # noqa
+
+ split = Split(str(original_instructions))
+ else:
+ split = None
+ dataset_kwargs = {"arrow_table": pa_table, "info": self._info, "split": split}
+ return dataset_kwargs
+
+ @deprecated()
+ def download_from_hf_gcs(self, download_config: DownloadConfig, relative_data_dir):
+ """
+ Download the dataset files from the Hf GCS
+
+ Args:
+ dl_cache_dir: `str`, the local cache directory used to download files
+ relative_data_dir: `str`, the relative directory of the remote files from
+ the `datasets` directory on GCS.
+
+ """
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ try:
+ remote_dataset_info = os.path.join(remote_cache_dir, "dataset_info.json")
+ downloaded_dataset_info = cached_path(
+ remote_dataset_info.replace(os.sep, "/"), download_config=download_config
+ )
+ shutil.move(downloaded_dataset_info, os.path.join(self._path, "dataset_info.json"))
+ if self._info is not None:
+ self._info.update(self._info.from_directory(self._path))
+ except FileNotFoundError as err:
+ raise DatasetNotOnHfGcsError(err) from None
+ try:
+ for split in self._info.splits:
+ file_instructions = self.get_file_instructions(
+ name=self._info.builder_name,
+ instruction=split,
+ split_infos=self._info.splits.values(),
+ )
+ for file_instruction in file_instructions:
+ file_to_download = str(Path(file_instruction["filename"]).relative_to(self._path))
+ remote_prepared_filename = os.path.join(remote_cache_dir, file_to_download)
+ downloaded_prepared_filename = cached_path(
+ remote_prepared_filename.replace(os.sep, "/"), download_config=download_config
+ )
+ shutil.move(downloaded_prepared_filename, file_instruction["filename"])
+ except FileNotFoundError as err:
+ raise MissingFilesOnHfGcsError(err) from None
+
+
+class ArrowReader(BaseReader):
+ """
+ Build a Dataset object out of Instruction instance(s).
+ This Reader uses either memory mapping or file descriptors (in-memory) on arrow files.
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ArrowReader.
+
+ Args:
+ path (str): path where Arrow files are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ super().__init__(path, info)
+ self._filetype_suffix = "arrow"
+
+ def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ filename, skip, take = (
+ filename_skip_take["filename"],
+ filename_skip_take["skip"] if "skip" in filename_skip_take else None,
+ filename_skip_take["take"] if "take" in filename_skip_take else None,
+ )
+ table = ArrowReader.read_table(filename, in_memory=in_memory)
+ if take == -1:
+ take = len(table) - skip
+ # here we don't want to slice an empty table, or it may segfault
+ if skip is not None and take is not None and not (skip == 0 and take == len(table)):
+ table = table.slice(skip, take)
+ return table
+
+ @staticmethod
+ def read_table(filename, in_memory=False) -> Table:
+ """
+ Read table from file.
+
+ Args:
+ filename (str): File name of the table.
+ in_memory (bool, default=False): Whether to copy the data in-memory.
+
+ Returns:
+ pyarrow.Table
+ """
+ table_cls = InMemoryTable if in_memory else MemoryMappedTable
+ return table_cls.from_file(filename)
+
+
+class ParquetReader(BaseReader):
+ """
+ Build a Dataset object out of Instruction instance(s).
+ This Reader uses memory mapping on parquet files.
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ParquetReader.
+
+ Args:
+ path (str): path where tfrecords are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ super().__init__(path, info)
+ self._filetype_suffix = "parquet"
+
+ def _get_table_from_filename(self, filename_skip_take, **kwargs):
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ filename, skip, take = (
+ filename_skip_take["filename"],
+ filename_skip_take["skip"] if "skip" in filename_skip_take else None,
+ filename_skip_take["take"] if "take" in filename_skip_take else None,
+ )
+ # Parquet read_table always loads data in memory, independently of memory_map
+ pa_table = pq.read_table(filename, memory_map=True)
+ # here we don't want to slice an empty table, or it may segfault
+ if skip is not None and take is not None and not (skip == 0 and take == len(pa_table)):
+ pa_table = pa_table.slice(skip, take)
+ return pa_table
+
+
+@dataclass(frozen=True)
+class _AbsoluteInstruction:
+ """A machine friendly slice: defined absolute positive boundaries."""
+
+ splitname: str
+ from_: int # uint (starting index).
+ to: int # uint (ending index).
+
+
+@dataclass(frozen=True)
+class _RelativeInstruction:
+ """Represents a single parsed slicing instruction, can use % and negatives."""
+
+ splitname: str
+ from_: Optional[int] = None # int (starting index) or None if no lower boundary.
+ to: Optional[int] = None # int (ending index) or None if no upper boundary.
+ unit: Optional[str] = None
+ rounding: Optional[str] = None
+
+ def __post_init__(self):
+ if self.unit is not None and self.unit not in ["%", "abs"]:
+ raise ValueError("unit must be either % or abs")
+ if self.rounding is not None and self.rounding not in ["closest", "pct1_dropremainder"]:
+ raise ValueError("rounding must be either closest or pct1_dropremainder")
+ if self.unit != "%" and self.rounding is not None:
+ raise ValueError("It is forbidden to specify rounding if not using percent slicing.")
+ if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100:
+ raise ValueError("Percent slice boundaries must be > -100 and < 100.")
+ if self.unit == "%" and self.to is not None and abs(self.to) > 100:
+ raise ValueError("Percent slice boundaries must be > -100 and < 100.")
+ # Update via __dict__ due to instance being "frozen"
+ self.__dict__["rounding"] = "closest" if self.rounding is None and self.unit == "%" else self.rounding
+
+
+def _str_to_read_instruction(spec):
+ """Returns ReadInstruction for given string."""
+ res = _SUB_SPEC_RE.match(spec)
+ if not res:
+ raise ValueError(f"Unrecognized instruction format: {spec}")
+ unit = "%" if res.group("from_pct") or res.group("to_pct") else "abs"
+ return ReadInstruction(
+ split_name=res.group("split"),
+ rounding=res.group("rounding"),
+ from_=int(res.group("from")) if res.group("from") else None,
+ to=int(res.group("to")) if res.group("to") else None,
+ unit=unit,
+ )
+
+
+def _pct_to_abs_pct1(boundary, num_examples):
+ # Using math.trunc here, since -99.5% should give -99%, not -100%.
+ if num_examples < 100:
+ msg = (
+ 'Using "pct1_dropremainder" rounding on a split with less than 100 '
+ "elements is forbidden: it always results in an empty dataset."
+ )
+ raise ValueError(msg)
+ return boundary * math.trunc(num_examples / 100.0)
+
+
+def _pct_to_abs_closest(boundary, num_examples):
+ return int(round(boundary * num_examples / 100.0))
+
+
+def _rel_to_abs_instr(rel_instr, name2len):
+ """Returns _AbsoluteInstruction instance for given RelativeInstruction.
+
+ Args:
+ rel_instr: RelativeInstruction instance.
+ name2len: dict {split_name: num_examples}.
+ """
+ pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == "closest" else _pct_to_abs_pct1
+ split = rel_instr.splitname
+ if split not in name2len:
+ raise ValueError(f'Unknown split "{split}". Should be one of {list(name2len)}.')
+ num_examples = name2len[split]
+ from_ = rel_instr.from_
+ to = rel_instr.to
+ if rel_instr.unit == "%":
+ from_ = 0 if from_ is None else pct_to_abs(from_, num_examples)
+ to = num_examples if to is None else pct_to_abs(to, num_examples)
+ else:
+ from_ = 0 if from_ is None else from_
+ to = num_examples if to is None else to
+ if from_ < 0:
+ from_ = max(num_examples + from_, 0)
+ if to < 0:
+ to = max(num_examples + to, 0)
+ from_ = min(from_, num_examples)
+ to = min(to, num_examples)
+ return _AbsoluteInstruction(split, from_, to)
+
+
+class ReadInstruction:
+ """Reading instruction for a dataset.
+
+ Examples::
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%]')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
+ 'test', from_=0, to=33, unit='%'))
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
+ 'test[:33%]+train[1:-1]'))
+ ds = datasets.load_dataset('mnist', split=(
+ datasets.ReadInstruction('test', to=33, unit='%') +
+ datasets.ReadInstruction('train', from_=1, to=-1, unit='abs')))
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
+ 'test[:33%](pct1_dropremainder)'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
+ 'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder"))
+
+ # 10-fold validation:
+ tests = datasets.load_dataset(
+ 'mnist',
+ [datasets.ReadInstruction('train', from_=k, to=k+10, unit='%')
+ for k in range(0, 100, 10)])
+ trains = datasets.load_dataset(
+ 'mnist',
+ [datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%')
+ for k in range(0, 100, 10)])
+
+ """
+
+ def _init(self, relative_instructions):
+ # Private initializer.
+ self._relative_instructions = relative_instructions
+
+ @classmethod
+ def _read_instruction_from_relative_instructions(cls, relative_instructions):
+ """Returns ReadInstruction obj initialized with relative_instructions."""
+ # Use __new__ to bypass __init__ used by public API and not conveniant here.
+ result = cls.__new__(cls)
+ result._init(relative_instructions) # pylint: disable=protected-access
+ return result
+
+ def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None):
+ """Initialize ReadInstruction.
+
+ Args:
+ split_name (str): name of the split to read. Eg: 'train'.
+ rounding (str, optional): The rounding behaviour to use when percent slicing is
+ used. Ignored when slicing with absolute indices.
+ Possible values:
+ - 'closest' (default): The specified percentages are rounded to the
+ closest value. Use this if you want specified percents to be as
+ much exact as possible.
+ - 'pct1_dropremainder': the specified percentages are treated as
+ multiple of 1%. Use this option if you want consistency. Eg:
+ len(5%) == 5 * len(1%).
+ Using this option, one might not be able to use the full set of
+ examples, if the number of those is not a multiple of 100.
+ from_ (int):
+ to (int): alternative way of specifying slicing boundaries. If any of
+ {from_, to, unit} argument is used, slicing cannot be specified as
+ string.
+ unit (str): optional, one of:
+ '%': to set the slicing unit as percents of the split size.
+ 'abs': to set the slicing unit as absolute numbers.
+ """
+ # This constructor is not always called. See factory method
+ # `_read_instruction_from_relative_instructions`. Common init instructions
+ # MUST be placed in the _init method.
+ self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)])
+
+ @classmethod
+ def from_spec(cls, spec):
+ """Creates a `ReadInstruction` instance out of a string spec.
+
+ Args:
+ spec (`str`):
+ Split(s) + optional slice(s) to read + optional rounding
+ if percents are used as the slicing unit. A slice can be specified,
+ using absolute numbers (`int`) or percentages (`int`).
+
+ Examples:
+
+ ```
+ test: test split.
+ test + validation: test split + validation split.
+ test[10:]: test split, minus its first 10 records.
+ test[:10%]: first 10% records of test split.
+ test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding.
+ test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train.
+ ```
+
+ Returns:
+ ReadInstruction instance.
+ """
+ spec = str(spec) # Need to convert to str in case of NamedSplit instance.
+ subs = _ADDITION_SEP_RE.split(spec)
+ if not subs:
+ raise ValueError(f"No instructions could be built out of {spec}")
+ instruction = _str_to_read_instruction(subs[0])
+ return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction)
+
+ def to_spec(self):
+ rel_instr_specs = []
+ for rel_instr in self._relative_instructions:
+ rel_instr_spec = rel_instr.splitname
+ if rel_instr.from_ is not None or rel_instr.to is not None:
+ from_ = rel_instr.from_
+ to = rel_instr.to
+ unit = rel_instr.unit
+ rounding = rel_instr.rounding
+ unit = unit if unit == "%" else ""
+ from_ = str(from_) + unit if from_ is not None else ""
+ to = str(to) + unit if to is not None else ""
+ slice_str = f"[{from_}:{to}]"
+ rounding_str = (
+ f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else ""
+ )
+ rel_instr_spec += slice_str + rounding_str
+ rel_instr_specs.append(rel_instr_spec)
+ return "+".join(rel_instr_specs)
+
+ def __add__(self, other):
+ """Returns a new ReadInstruction obj, result of appending other to self."""
+ if not isinstance(other, ReadInstruction):
+ msg = "ReadInstruction can only be added to another ReadInstruction obj."
+ raise TypeError(msg)
+ self_ris = self._relative_instructions
+ other_ris = other._relative_instructions # pylint: disable=protected-access
+ if (
+ self_ris[0].unit != "abs"
+ and other_ris[0].unit != "abs"
+ and self._relative_instructions[0].rounding != other_ris[0].rounding
+ ):
+ raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.")
+ return self._read_instruction_from_relative_instructions(self_ris + other_ris)
+
+ def __str__(self):
+ return self.to_spec()
+
+ def __repr__(self):
+ return f"ReadInstruction({self._relative_instructions})"
+
+ def to_absolute(self, name2len):
+ """Translate instruction into a list of absolute instructions.
+
+ Those absolute instructions are then to be added together.
+
+ Args:
+ name2len (`dict`):
+ Associating split names to number of examples.
+
+ Returns:
+ list of _AbsoluteInstruction instances (corresponds to the + in spec).
+ """
+ return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions]
diff --git a/venv/lib/python3.10/site-packages/datasets/arrow_writer.py b/venv/lib/python3.10/site-packages/datasets/arrow_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea12d094e6316c8537e6f3cb556b5fc20b5cd92d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/arrow_writer.py
@@ -0,0 +1,746 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""To write records into Parquet files."""
+
+import errno
+import json
+import os
+import sys
+from pathlib import Path
+from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
+
+import fsspec
+import numpy as np
+import pyarrow as pa
+import pyarrow.parquet as pq
+from fsspec.core import url_to_fs
+
+from . import config
+from .features import Features, Image, Value
+from .features.features import (
+ FeatureType,
+ _ArrayXDExtensionType,
+ cast_to_python_objects,
+ generate_from_arrow_type,
+ get_nested_type,
+ list_of_np_array_to_pyarrow_listarray,
+ numpy_to_pyarrow_listarray,
+ to_pyarrow_listarray,
+)
+from .filesystems import is_remote_filesystem
+from .info import DatasetInfo
+from .keyhash import DuplicatedKeysError, KeyHasher
+from .table import array_cast, cast_array_to_feature, embed_table_storage, table_cast
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.file_utils import hash_url_to_filename
+from .utils.py_utils import asdict, first_non_null_value
+
+
+logger = logging.get_logger(__name__)
+
+type_ = type # keep python's type function
+
+
+class SchemaInferenceError(ValueError):
+ pass
+
+
+class TypedSequence:
+ """
+ This data container generalizes the typing when instantiating pyarrow arrays, tables or batches.
+
+ More specifically it adds several features:
+ - Support extension types like ``datasets.features.Array2DExtensionType``:
+ By default pyarrow arrays don't return extension arrays. One has to call
+ ``pa.ExtensionArray.from_storage(type, pa.array(data, type.storage_type))``
+ in order to get an extension array.
+ - Support for ``try_type`` parameter that can be used instead of ``type``:
+ When an array is transformed, we like to keep the same type as before if possible.
+ For example when calling :func:`datasets.Dataset.map`, we don't want to change the type
+ of each column by default.
+ - Better error message when a pyarrow array overflows.
+
+ Example::
+
+ from datasets.features import Array2D, Array2DExtensionType, Value
+ from datasets.arrow_writer import TypedSequence
+ import pyarrow as pa
+
+ arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32")))
+ assert arr.type == pa.int32()
+
+ arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32")))
+ assert arr.type == pa.int32()
+
+ arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int32")))
+ assert arr.type == pa.string()
+
+ arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")))
+ assert arr.type == Array2DExtensionType((1, 3), "int64")
+
+ table = pa.Table.from_pydict({
+ "image": TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))
+ })
+ assert table["image"].type == Array2DExtensionType((1, 3), "int64")
+
+ """
+
+ def __init__(
+ self,
+ data: Iterable,
+ type: Optional[FeatureType] = None,
+ try_type: Optional[FeatureType] = None,
+ optimized_int_type: Optional[FeatureType] = None,
+ ):
+ # assert type is None or try_type is None,
+ if type is not None and try_type is not None:
+ raise ValueError("You cannot specify both type and try_type")
+ # set attributes
+ self.data = data
+ self.type = type
+ self.try_type = try_type # is ignored if it doesn't match the data
+ self.optimized_int_type = optimized_int_type
+ # when trying a type (is ignored if data is not compatible)
+ self.trying_type = self.try_type is not None
+ self.trying_int_optimization = optimized_int_type is not None and type is None and try_type is None
+ # used to get back the inferred type after __arrow_array__() is called once
+ self._inferred_type = None
+
+ def get_inferred_type(self) -> FeatureType:
+ """Return the inferred feature type.
+ This is done by converting the sequence to an Arrow array, and getting the corresponding
+ feature type.
+
+ Since building the Arrow array can be expensive, the value of the inferred type is cached
+ as soon as pa.array is called on the typed sequence.
+
+ Returns:
+ FeatureType: inferred feature type of the sequence.
+ """
+ if self._inferred_type is None:
+ self._inferred_type = generate_from_arrow_type(pa.array(self).type)
+ return self._inferred_type
+
+ @staticmethod
+ def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]:
+ """Implement type inference for custom objects like PIL.Image.Image -> Image type.
+
+ This function is only used for custom python objects that can't be direclty passed to build
+ an Arrow array. In such cases is infers the feature type to use, and it encodes the data so
+ that they can be passed to an Arrow array.
+
+ Args:
+ data (Iterable): array of data to infer the type, e.g. a list of PIL images.
+
+ Returns:
+ Tuple[Iterable, Optional[FeatureType]]: a tuple with:
+ - the (possibly encoded) array, if the inferred feature type requires encoding
+ - the inferred feature type if the array is made of supported custom objects like
+ PIL images, else None.
+ """
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ non_null_idx, non_null_value = first_non_null_value(data)
+ if isinstance(non_null_value, PIL.Image.Image):
+ return [Image().encode_example(value) if value is not None else None for value in data], Image()
+ return data, None
+
+ def __arrow_array__(self, type: Optional[pa.DataType] = None):
+ """This function is called when calling pa.array(typed_sequence)"""
+
+ if type is not None:
+ raise ValueError("TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)")
+ del type # make sure we don't use it
+ data = self.data
+ # automatic type inference for custom objects
+ if self.type is None and self.try_type is None:
+ data, self._inferred_type = self._infer_custom_type_and_encode(data)
+ if self._inferred_type is None:
+ type = self.try_type if self.trying_type else self.type
+ else:
+ type = self._inferred_type
+ pa_type = get_nested_type(type) if type is not None else None
+ optimized_int_pa_type = (
+ get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None
+ )
+ trying_cast_to_python_objects = False
+ try:
+ # custom pyarrow types
+ if isinstance(pa_type, _ArrayXDExtensionType):
+ storage = to_pyarrow_listarray(data, pa_type)
+ return pa.ExtensionArray.from_storage(pa_type, storage)
+
+ # efficient np array to pyarrow array
+ if isinstance(data, np.ndarray):
+ out = numpy_to_pyarrow_listarray(data)
+ elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray):
+ out = list_of_np_array_to_pyarrow_listarray(data)
+ else:
+ trying_cast_to_python_objects = True
+ out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
+ # use smaller integer precisions if possible
+ if self.trying_int_optimization:
+ if pa.types.is_int64(out.type):
+ out = out.cast(optimized_int_pa_type)
+ elif pa.types.is_list(out.type):
+ if pa.types.is_int64(out.type.value_type):
+ out = array_cast(out, pa.list_(optimized_int_pa_type))
+ elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type):
+ out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type)))
+ # otherwise we can finally use the user's type
+ elif type is not None:
+ # We use cast_array_to_feature to support casting to custom types like Audio and Image
+ # Also, when trying type "string", we don't want to convert integers or floats to "string".
+ # We only do it if trying_type is False - since this is what the user asks for.
+ out = cast_array_to_feature(
+ out, type, allow_primitive_to_str=not self.trying_type, allow_decimal_to_str=not self.trying_type
+ )
+ return out
+ except (
+ TypeError,
+ pa.lib.ArrowInvalid,
+ pa.lib.ArrowNotImplementedError,
+ ) as e: # handle type errors and overflows
+ # Ignore ArrowNotImplementedError caused by trying type, otherwise re-raise
+ if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError):
+ raise
+
+ if self.trying_type:
+ try: # second chance
+ if isinstance(data, np.ndarray):
+ return numpy_to_pyarrow_listarray(data)
+ elif isinstance(data, list) and data and any(isinstance(value, np.ndarray) for value in data):
+ return list_of_np_array_to_pyarrow_listarray(data)
+ else:
+ trying_cast_to_python_objects = True
+ return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
+ except pa.lib.ArrowInvalid as e:
+ if "overflow" in str(e):
+ raise OverflowError(
+ f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
+ ) from None
+ elif self.trying_int_optimization and "not in range" in str(e):
+ optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
+ logger.info(
+ f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64."
+ )
+ return out
+ elif trying_cast_to_python_objects and "Could not convert" in str(e):
+ out = pa.array(
+ cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)
+ )
+ if type is not None:
+ out = cast_array_to_feature(
+ out, type, allow_primitive_to_str=True, allow_decimal_to_str=True
+ )
+ return out
+ else:
+ raise
+ elif "overflow" in str(e):
+ raise OverflowError(
+ f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
+ ) from None
+ elif self.trying_int_optimization and "not in range" in str(e):
+ optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
+ logger.info(f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.")
+ return out
+ elif trying_cast_to_python_objects and "Could not convert" in str(e):
+ out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False))
+ if type is not None:
+ out = cast_array_to_feature(out, type, allow_primitive_to_str=True, allow_decimal_to_str=True)
+ return out
+ else:
+ raise
+
+
+class OptimizedTypedSequence(TypedSequence):
+ def __init__(
+ self,
+ data,
+ type: Optional[FeatureType] = None,
+ try_type: Optional[FeatureType] = None,
+ col: Optional[str] = None,
+ optimized_int_type: Optional[FeatureType] = None,
+ ):
+ optimized_int_type_by_col = {
+ "attention_mask": Value("int8"), # binary tensor
+ "special_tokens_mask": Value("int8"),
+ "input_ids": Value("int32"), # typical vocab size: 0-50k (max ~500k, never > 1M)
+ "token_type_ids": Value(
+ "int8"
+ ), # binary mask; some (XLNetModel) use an additional token represented by a 2
+ }
+ if type is None and try_type is None:
+ optimized_int_type = optimized_int_type_by_col.get(col, None)
+ super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type)
+
+
+class ArrowWriter:
+ """Shuffles and writes Examples to Arrow files."""
+
+ _WRITER_CLASS = pa.RecordBatchStreamWriter
+
+ def __init__(
+ self,
+ schema: Optional[pa.Schema] = None,
+ features: Optional[Features] = None,
+ path: Optional[str] = None,
+ stream: Optional[pa.NativeFile] = None,
+ fingerprint: Optional[str] = None,
+ writer_batch_size: Optional[int] = None,
+ hash_salt: Optional[str] = None,
+ check_duplicates: Optional[bool] = False,
+ disable_nullable: bool = False,
+ update_features: bool = False,
+ with_metadata: bool = True,
+ unit: str = "examples",
+ embed_local_files: bool = False,
+ storage_options: Optional[dict] = None,
+ ):
+ if path is None and stream is None:
+ raise ValueError("At least one of path and stream must be provided.")
+ if features is not None:
+ self._features = features
+ self._schema = None
+ elif schema is not None:
+ self._schema: pa.Schema = schema
+ self._features = Features.from_arrow_schema(self._schema)
+ else:
+ self._features = None
+ self._schema = None
+
+ if hash_salt is not None:
+ # Create KeyHasher instance using split name as hash salt
+ self._hasher = KeyHasher(hash_salt)
+ else:
+ self._hasher = KeyHasher("")
+
+ self._check_duplicates = check_duplicates
+ self._disable_nullable = disable_nullable
+
+ if stream is None:
+ fs, path = url_to_fs(path, **(storage_options or {}))
+ self._fs: fsspec.AbstractFileSystem = fs
+ self._path = path if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(path)
+ self.stream = self._fs.open(path, "wb")
+ self._closable_stream = True
+ else:
+ self._fs = None
+ self._path = None
+ self.stream = stream
+ self._closable_stream = False
+
+ self.fingerprint = fingerprint
+ self.disable_nullable = disable_nullable
+ self.writer_batch_size = writer_batch_size or config.DEFAULT_MAX_BATCH_SIZE
+ self.update_features = update_features
+ self.with_metadata = with_metadata
+ self.unit = unit
+ self.embed_local_files = embed_local_files
+
+ self._num_examples = 0
+ self._num_bytes = 0
+ self.current_examples: List[Tuple[Dict[str, Any], str]] = []
+ self.current_rows: List[pa.Table] = []
+ self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None
+ self.hkey_record = []
+
+ def __len__(self):
+ """Return the number of writed and staged examples"""
+ return self._num_examples + len(self.current_examples) + len(self.current_rows)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+ def close(self):
+ # Try closing if opened; if closed: pyarrow.lib.ArrowInvalid: Invalid operation on closed file
+ if self.pa_writer: # it might be None
+ try:
+ self.pa_writer.close()
+ except Exception: # pyarrow.lib.ArrowInvalid, OSError
+ pass
+ if self._closable_stream and not self.stream.closed:
+ self.stream.close() # This also closes self.pa_writer if it is opened
+
+ def _build_writer(self, inferred_schema: pa.Schema):
+ schema = self.schema
+ inferred_features = Features.from_arrow_schema(inferred_schema)
+ if self._features is not None:
+ if self.update_features: # keep original features it they match, or update them
+ fields = {field.name: field for field in self._features.type}
+ for inferred_field in inferred_features.type:
+ name = inferred_field.name
+ if name in fields:
+ if inferred_field == fields[name]:
+ inferred_features[name] = self._features[name]
+ self._features = inferred_features
+ schema: pa.Schema = inferred_schema
+ else:
+ self._features = inferred_features
+ schema: pa.Schema = inferred_features.arrow_schema
+ if self.disable_nullable:
+ schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in schema)
+ if self.with_metadata:
+ schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint))
+ else:
+ schema = schema.with_metadata({})
+ self._schema = schema
+ self.pa_writer = self._WRITER_CLASS(self.stream, schema)
+
+ @property
+ def schema(self):
+ _schema = (
+ self._schema
+ if self._schema is not None
+ else (pa.schema(self._features.type) if self._features is not None else None)
+ )
+ if self._disable_nullable and _schema is not None:
+ _schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in _schema)
+ return _schema if _schema is not None else []
+
+ @staticmethod
+ def _build_metadata(info: DatasetInfo, fingerprint: Optional[str] = None) -> Dict[str, str]:
+ info_keys = ["features"] # we can add support for more DatasetInfo keys in the future
+ info_as_dict = asdict(info)
+ metadata = {}
+ metadata["info"] = {key: info_as_dict[key] for key in info_keys}
+ if fingerprint is not None:
+ metadata["fingerprint"] = fingerprint
+ return {"huggingface": json.dumps(metadata)}
+
+ def write_examples_on_file(self):
+ """Write stored examples from the write-pool of examples. It makes a table out of the examples and write it."""
+ if not self.current_examples:
+ return
+ # preserve the order the columns
+ if self.schema:
+ schema_cols = set(self.schema.names)
+ examples_cols = self.current_examples[0][0].keys() # .keys() preserves the order (unlike set)
+ common_cols = [col for col in self.schema.names if col in examples_cols]
+ extra_cols = [col for col in examples_cols if col not in schema_cols]
+ cols = common_cols + extra_cols
+ else:
+ cols = list(self.current_examples[0][0])
+ batch_examples = {}
+ for col in cols:
+ # We use row[0][col] since current_examples contains (example, key) tuples.
+ # Morever, examples could be Arrow arrays of 1 element.
+ # This can happen in `.map()` when we want to re-write the same Arrow data
+ if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples):
+ arrays = [row[0][col] for row in self.current_examples]
+ arrays = [
+ chunk
+ for array in arrays
+ for chunk in (array.chunks if isinstance(array, pa.ChunkedArray) else [array])
+ ]
+ batch_examples[col] = pa.concat_arrays(arrays)
+ else:
+ batch_examples[col] = [
+ row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]
+ for row in self.current_examples
+ ]
+ self.write_batch(batch_examples=batch_examples)
+ self.current_examples = []
+
+ def write_rows_on_file(self):
+ """Write stored rows from the write-pool of rows. It concatenates the single-row tables and it writes the resulting table."""
+ if not self.current_rows:
+ return
+ table = pa.concat_tables(self.current_rows)
+ self.write_table(table)
+ self.current_rows = []
+
+ def write(
+ self,
+ example: Dict[str, Any],
+ key: Optional[Union[str, int, bytes]] = None,
+ writer_batch_size: Optional[int] = None,
+ ):
+ """Add a given (Example,Key) pair to the write-pool of examples which is written to file.
+
+ Args:
+ example: the Example to add.
+ key: Optional, a unique identifier(str, int or bytes) associated with each example
+ """
+ # Utilize the keys and duplicate checking when `self._check_duplicates` is passed True
+ if self._check_duplicates:
+ # Create unique hash from key and store as (key, example) pairs
+ hash = self._hasher.hash(key)
+ self.current_examples.append((example, hash))
+ # Maintain record of keys and their respective hashes for checking duplicates
+ self.hkey_record.append((hash, key))
+ else:
+ # Store example as a tuple so as to keep the structure of `self.current_examples` uniform
+ self.current_examples.append((example, ""))
+
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size:
+ if self._check_duplicates:
+ self.check_duplicate_keys()
+ # Re-intializing to empty list for next batch
+ self.hkey_record = []
+
+ self.write_examples_on_file()
+
+ def check_duplicate_keys(self):
+ """Raises error if duplicates found in a batch"""
+ tmp_record = set()
+ for hash, key in self.hkey_record:
+ if hash in tmp_record:
+ duplicate_key_indices = [
+ str(self._num_examples + index)
+ for index, (duplicate_hash, _) in enumerate(self.hkey_record)
+ if duplicate_hash == hash
+ ]
+
+ raise DuplicatedKeysError(key, duplicate_key_indices)
+ else:
+ tmp_record.add(hash)
+
+ def write_row(self, row: pa.Table, writer_batch_size: Optional[int] = None):
+ """Add a given single-row Table to the write-pool of rows which is written to file.
+
+ Args:
+ row: the row to add.
+ """
+ if len(row) != 1:
+ raise ValueError(f"Only single-row pyarrow tables are allowed but got table with {len(row)} rows.")
+ self.current_rows.append(row)
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size:
+ self.write_rows_on_file()
+
+ def write_batch(
+ self,
+ batch_examples: Dict[str, List],
+ writer_batch_size: Optional[int] = None,
+ ):
+ """Write a batch of Example to file.
+ Ignores the batch if it appears to be empty,
+ preventing a potential schema update of unknown types.
+
+ Args:
+ batch_examples: the batch of examples to add.
+ """
+ if batch_examples and len(next(iter(batch_examples.values()))) == 0:
+ return
+ features = None if self.pa_writer is None and self.update_features else self._features
+ try_features = self._features if self.pa_writer is None and self.update_features else None
+ arrays = []
+ inferred_features = Features()
+ # preserve the order the columns
+ if self.schema:
+ schema_cols = set(self.schema.names)
+ batch_cols = batch_examples.keys() # .keys() preserves the order (unlike set)
+ common_cols = [col for col in self.schema.names if col in batch_cols]
+ extra_cols = [col for col in batch_cols if col not in schema_cols]
+ cols = common_cols + extra_cols
+ else:
+ cols = list(batch_examples)
+ for col in cols:
+ col_values = batch_examples[col]
+ col_type = features[col] if features else None
+ if isinstance(col_values, (pa.Array, pa.ChunkedArray)):
+ array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values
+ arrays.append(array)
+ inferred_features[col] = generate_from_arrow_type(col_values.type)
+ else:
+ col_try_type = try_features[col] if try_features is not None and col in try_features else None
+ typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)
+ arrays.append(pa.array(typed_sequence))
+ inferred_features[col] = typed_sequence.get_inferred_type()
+ schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema
+ pa_table = pa.Table.from_arrays(arrays, schema=schema)
+ self.write_table(pa_table, writer_batch_size)
+
+ def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None):
+ """Write a Table to file.
+
+ Args:
+ example: the Table to add.
+ """
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if self.pa_writer is None:
+ self._build_writer(inferred_schema=pa_table.schema)
+ pa_table = pa_table.combine_chunks()
+ pa_table = table_cast(pa_table, self._schema)
+ if self.embed_local_files:
+ pa_table = embed_table_storage(pa_table)
+ self._num_bytes += pa_table.nbytes
+ self._num_examples += pa_table.num_rows
+ self.pa_writer.write_table(pa_table, writer_batch_size)
+
+ def finalize(self, close_stream=True):
+ self.write_rows_on_file()
+ # In case current_examples < writer_batch_size, but user uses finalize()
+ if self._check_duplicates:
+ self.check_duplicate_keys()
+ # Re-intializing to empty list for next batch
+ self.hkey_record = []
+ self.write_examples_on_file()
+ # If schema is known, infer features even if no examples were written
+ if self.pa_writer is None and self.schema:
+ self._build_writer(self.schema)
+ if self.pa_writer is not None:
+ self.pa_writer.close()
+ self.pa_writer = None
+ if close_stream:
+ self.stream.close()
+ else:
+ if close_stream:
+ self.stream.close()
+ raise SchemaInferenceError("Please pass `features` or at least one example when writing data")
+ logger.debug(
+ f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {self._path if self._path else ''}."
+ )
+ return self._num_examples, self._num_bytes
+
+
+class ParquetWriter(ArrowWriter):
+ _WRITER_CLASS = pq.ParquetWriter
+
+
+class BeamWriter:
+ """
+ Shuffles and writes Examples to Arrow files.
+ The Arrow files are converted from Parquet files that are the output of Apache Beam pipelines.
+ """
+
+ def __init__(
+ self,
+ features: Optional[Features] = None,
+ schema: Optional[pa.Schema] = None,
+ path: Optional[str] = None,
+ namespace: Optional[str] = None,
+ cache_dir: Optional[str] = None,
+ ):
+ if features is None and schema is None:
+ raise ValueError("At least one of features and schema must be provided.")
+ if path is None:
+ raise ValueError("Path must be provided.")
+
+ if features is not None:
+ self._features: Features = features
+ self._schema: pa.Schema = features.arrow_schema
+ else:
+ self._schema: pa.Schema = schema
+ self._features: Features = Features.from_arrow_schema(schema)
+
+ self._path = path
+ self._parquet_path = os.path.splitext(path)[0] # remove extension
+ self._namespace = namespace or "default"
+ self._num_examples = None
+ self._cache_dir = cache_dir or config.HF_DATASETS_CACHE
+
+ def write_from_pcollection(self, pcoll_examples):
+ """Add the final steps of the beam pipeline: write to parquet files."""
+ import apache_beam as beam
+
+ def inc_num_examples(example):
+ beam.metrics.Metrics.counter(self._namespace, "num_examples").inc()
+
+ # count examples
+ _ = pcoll_examples | "Count N. Examples" >> beam.Map(inc_num_examples)
+
+ # save dataset
+ return (
+ pcoll_examples
+ | "Get values" >> beam.Values()
+ | "Save to parquet"
+ >> beam.io.parquetio.WriteToParquet(
+ self._parquet_path, self._schema, shard_name_template="-SSSSS-of-NNNNN.parquet"
+ )
+ )
+
+ def finalize(self, metrics_query_result: dict):
+ """
+ Run after the pipeline has finished.
+ It converts the resulting parquet files to arrow and it completes the info from the pipeline metrics.
+
+ Args:
+ metrics_query_result: `dict` obtained from pipeline_results.metrics().query(m_filter). Make sure
+ that the filter keeps only the metrics for the considered split, under the namespace `split_name`.
+ """
+
+ # Beam FileSystems require the system's path separator in the older versions
+ fs, parquet_path = url_to_fs(self._parquet_path)
+ parquet_path = str(Path(parquet_path)) if not is_remote_filesystem(fs) else fs.unstrip_protocol(parquet_path)
+
+ shards = fs.glob(parquet_path + "*.parquet")
+ num_bytes = sum(fs.sizes(shards))
+ shard_lengths = get_parquet_lengths(shards)
+
+ # Convert to arrow
+ if self._path.endswith(".arrow"):
+ logger.info(f"Converting parquet files {self._parquet_path} to arrow {self._path}")
+ try: # stream conversion
+ num_bytes = 0
+ for shard in hf_tqdm(shards, unit="shards"):
+ with fs.open(shard, "rb") as source:
+ with fs.open(shard.replace(".parquet", ".arrow"), "wb") as destination:
+ shard_num_bytes, _ = parquet_to_arrow(source, destination)
+ num_bytes += shard_num_bytes
+ except OSError as e: # broken pipe can happen if the connection is unstable, do local conversion instead
+ if e.errno != errno.EPIPE: # not a broken pipe
+ raise
+ logger.warning(
+ "Broken Pipe during stream conversion from parquet to arrow. Using local convert instead"
+ )
+ local_convert_dir = os.path.join(self._cache_dir, "beam_convert")
+ os.makedirs(local_convert_dir, exist_ok=True)
+ num_bytes = 0
+ for shard in hf_tqdm(shards, unit="shards"):
+ local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(shard) + ".parquet")
+ fs.download(shard, local_parquet_path)
+ local_arrow_path = local_parquet_path.replace(".parquet", ".arrow")
+ shard_num_bytes, _ = parquet_to_arrow(local_parquet_path, local_arrow_path)
+ num_bytes += shard_num_bytes
+ remote_arrow_path = shard.replace(".parquet", ".arrow")
+ fs.upload(local_arrow_path, remote_arrow_path)
+
+ # Save metrics
+ counters_dict = {metric.key.metric.name: metric.result for metric in metrics_query_result["counters"]}
+ self._num_examples = counters_dict["num_examples"]
+ self._num_bytes = num_bytes
+ self._shard_lengths = shard_lengths
+ return self._num_examples, self._num_bytes
+
+
+def get_parquet_lengths(sources) -> List[int]:
+ shard_lengths = []
+ for source in hf_tqdm(sources, unit="parquet files"):
+ parquet_file = pa.parquet.ParquetFile(source)
+ shard_lengths.append(parquet_file.metadata.num_rows)
+ return shard_lengths
+
+
+def parquet_to_arrow(source, destination) -> List[int]:
+ """Convert parquet file to arrow file. Inputs can be str paths or file-like objects"""
+ stream = None if isinstance(destination, str) else destination
+ parquet_file = pa.parquet.ParquetFile(source)
+ # Beam can create empty Parquet files, so we need to pass the source Parquet file's schema
+ with ArrowWriter(schema=parquet_file.schema_arrow, path=destination, stream=stream) as writer:
+ for record_batch in parquet_file.iter_batches():
+ pa_table = pa.Table.from_batches([record_batch])
+ writer.write_table(pa_table)
+ num_bytes, num_examples = writer.finalize()
+ return num_bytes, num_examples
diff --git a/venv/lib/python3.10/site-packages/datasets/builder.bak.py b/venv/lib/python3.10/site-packages/datasets/builder.bak.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac5324b7033259aaa4a66d8cbb3315a19384480e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/builder.bak.py
@@ -0,0 +1,2300 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""DatasetBuilder base class."""
+
+import abc
+import contextlib
+import copy
+import inspect
+import os
+import posixpath
+import shutil
+import textwrap
+import time
+import urllib
+import warnings
+from dataclasses import dataclass
+from functools import partial
+from pathlib import Path
+from typing import Dict, Iterable, Mapping, Optional, Tuple, Union
+
+import fsspec
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from datasets import ReadInstruction
+
+from . import config, utils
+from .arrow_dataset import Dataset
+from .arrow_reader import (
+ HF_GCP_BASE_URL,
+ ArrowReader,
+ DatasetNotOnHfGcsError,
+ MissingFilesOnHfGcsError,
+)
+from .arrow_writer import ArrowWriter, BeamWriter, ParquetWriter, SchemaInferenceError
+from .data_files import DataFilesDict, sanitize_patterns
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager, DownloadMode
+from .download.mock_download_manager import MockDownloadManager
+from .download.streaming_download_manager import StreamingDownloadManager
+from .features import Features
+from .filesystems import is_remote_filesystem
+from .fingerprint import Hasher
+from .info import DatasetInfo, DatasetInfosDict, PostProcessedInfo
+from .iterable_dataset import ExamplesIterable, IterableDataset, _generate_examples_from_tables_wrapper
+from .keyhash import DuplicatedKeysError
+from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase
+from .splits import Split, SplitDict, SplitGenerator, SplitInfo
+from .streaming import extend_dataset_builder_for_streaming
+from .utils import logging
+from .utils.file_utils import cached_path, is_remote_url
+from .utils.filelock import FileLock
+from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits
+from .utils.py_utils import (
+ classproperty,
+ convert_file_size_to_int,
+ has_sufficient_disk_space,
+ iflatmap_unordered,
+ map_nested,
+ memoize,
+ size_str,
+ temporary_assignment,
+)
+from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs
+
+
+logger = logging.get_logger(__name__)
+
+
+class InvalidConfigName(ValueError):
+ pass
+
+
+class DatasetBuildError(Exception):
+ pass
+
+
+class ManualDownloadError(DatasetBuildError):
+ pass
+
+
+class DatasetGenerationError(DatasetBuildError):
+ pass
+
+
+@dataclass
+class BuilderConfig:
+ """Base class for `DatasetBuilder` data configuration.
+
+ `DatasetBuilder` subclasses with data configuration options should subclass
+ `BuilderConfig` and add their own properties.
+
+ Attributes:
+ name (`str`, defaults to `default`):
+ version (`Version` or `str`, *optional*):
+ data_dir (`str`, *optional*):
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ description (`str`, *optional*):
+ """
+
+ name: str = "default"
+ version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
+ data_dir: Optional[str] = None
+ data_files: Optional[DataFilesDict] = None
+ description: Optional[str] = None
+
+ def __post_init__(self):
+ # The config name is used to name the cache directory.
+ for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
+ if invalid_char in self.name:
+ raise InvalidConfigName(
+ f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
+ f"They could create issues when creating a directory for this config on Windows filesystem."
+ )
+ if self.data_files is not None and not isinstance(self.data_files, DataFilesDict):
+ raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}")
+
+ def __eq__(self, o):
+ # we need to override the default dataclass __eq__ since it doesn't check for
+ # other attributes that the ones of the signature.
+ if set(self.__dict__.keys()) != set(o.__dict__.keys()):
+ return False
+ return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
+
+ def create_config_id(
+ self,
+ config_kwargs: dict,
+ custom_features: Optional[Features] = None,
+ ) -> str:
+ """
+ The config id is used to build the cache directory.
+ By default it is equal to the config name.
+ However the name of a config is not sufficient to have a unique identifier for the dataset being generated
+ since it doesn't take into account:
+ - the config kwargs that can be used to overwrite attributes
+ - the custom features used to write the dataset
+ - the data_files for json/text/csv/pandas datasets
+
+ Therefore the config id is just the config name with an optional suffix based on these.
+ """
+ # Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
+ suffix: Optional[str] = None
+ config_kwargs_to_add_to_suffix = config_kwargs.copy()
+ # name and version are already used to build the cache directory
+ config_kwargs_to_add_to_suffix.pop("name", None)
+ config_kwargs_to_add_to_suffix.pop("version", None)
+ # data dir handling (when specified it points to the manually downloaded data):
+ # it was previously ignored before the introduction of config id because we didn't want
+ # to change the config name. Now it's fine to take it into account for the config id.
+ # config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ if "data_dir" in config_kwargs_to_add_to_suffix and config_kwargs_to_add_to_suffix["data_dir"] is None:
+ config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ if config_kwargs_to_add_to_suffix:
+ # we don't care about the order of the kwargs
+ config_kwargs_to_add_to_suffix = {
+ k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
+ }
+ if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
+ suffix = ",".join(
+ str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
+ )
+ if len(suffix) > 32: # hash if too long
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+ else:
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+
+ if custom_features is not None:
+ m = Hasher()
+ if suffix:
+ m.update(suffix)
+ m.update(custom_features)
+ suffix = m.hexdigest()
+
+ if suffix:
+ config_id = self.name + "-" + suffix
+ if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
+ config_id = self.name + "-" + Hasher.hash(suffix)
+ return config_id
+ else:
+ return self.name
+
+
+class DatasetBuilder:
+ """Abstract base class for all datasets.
+
+ `DatasetBuilder` has 3 key methods:
+
+ - [`DatasetBuilder.info`]: Documents the dataset, including feature
+ names, types, shapes, version, splits, citation, etc.
+ - [`DatasetBuilder.download_and_prepare`]: Downloads the source data
+ and writes it to disk.
+ - [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
+
+ Some `DatasetBuilder`s expose multiple variants of the
+ dataset by defining a [`BuilderConfig`] subclass and accepting a
+ config object (or name) on construction. Configurable datasets expose a
+ pre-defined set of configurations in [`DatasetBuilder.builder_configs`].
+
+ Args:
+ cache_dir (`str`, *optional*):
+ Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
+ config_name (`str`, *optional*):
+ Name of the dataset configuration.
+ It affects the data generated on disk. Different configurations will have their own subdirectories and
+ versions.
+ If not provided, the default configuration is used (if it exists).
+
+
+
+ Parameter `name` was renamed to `config_name`.
+
+
+ hash (`str`, *optional*):
+ Hash specific to the dataset code. Used to update the caching directory when the
+ dataset loading script code is updated (to avoid reusing old data).
+ The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files.
+ This can be a remote URL.
+ features ([`Features`], *optional*):
+ Features types to use with this dataset.
+ It can be used to change the [`Features`] types of a dataset, for example.
+ use_auth_token (`str` or `bool`, *optional*):
+ String or boolean to use as Bearer token for remote files on the
+ Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
+ repo_id (`str`, *optional*):
+ ID of the dataset repository.
+ Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
+ and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ For builders like "csv" or "json" that need the user to specify data files. They can be either
+ local or remote files. For convenience, you can use a `DataFilesDict`.
+ data_dir (`str`, *optional*):
+ Path to directory containing source data file(s).
+ Use only if `data_files` is not passed, in which case it is equivalent to passing
+ `os.path.join(data_dir, "**")` as `data_files`.
+ For builders that require manual download, it must be the path to the local directory containing the
+ manually downloaded data.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
+ writer_batch_size (`int`, *optional*):
+ Batch size used by the ArrowWriter.
+ It defines the number of samples that are kept in memory before writing them
+ and also the length of the arrow chunks.
+ None means that the ArrowWriter will use its default value.
+ name (`str`): Configuration name for the dataset.
+
+
+
+ Use `config_name` instead.
+
+
+
+ **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
+ configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
+ configuration class is [`BuilderConfig`] or a subclass of it.
+ """
+
+ # Default version
+ VERSION = None # Default version set in BuilderConfig
+
+ # Class for the builder config.
+ BUILDER_CONFIG_CLASS = BuilderConfig
+
+ # Named configurations that modify the data generated by download_and_prepare.
+ BUILDER_CONFIGS = []
+
+ # Optional default config name to be used when name is None
+ DEFAULT_CONFIG_NAME = None
+
+ # Default batch size used by the ArrowWriter
+ # It defines the number of samples that are kept in memory before writing them
+ # and also the length of the arrow chunks
+ # None means that the ArrowWriter will use its default value
+ DEFAULT_WRITER_BATCH_SIZE = None
+
+ def __init__(
+ self,
+ cache_dir: Optional[str] = None,
+ config_name: Optional[str] = None,
+ hash: Optional[str] = None,
+ base_path: Optional[str] = None,
+ info: Optional[DatasetInfo] = None,
+ features: Optional[Features] = None,
+ use_auth_token: Optional[Union[bool, str]] = None,
+ repo_id: Optional[str] = None,
+ data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
+ data_dir: Optional[str] = None,
+ storage_options: Optional[dict] = None,
+ writer_batch_size: Optional[int] = None,
+ name="deprecated",
+ **config_kwargs,
+ ):
+ if name != "deprecated":
+ warnings.warn(
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+ config_name = name
+ # DatasetBuilder name
+ self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
+ self.hash: Optional[str] = hash
+ self.base_path = base_path
+ self.use_auth_token = use_auth_token
+ self.repo_id = repo_id
+ self.storage_options = storage_options
+ self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
+
+ if data_files is not None and not isinstance(data_files, DataFilesDict):
+ data_files = DataFilesDict.from_local_or_remote(
+ sanitize_patterns(data_files), base_path=base_path, use_auth_token=use_auth_token
+ )
+
+ # Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
+ if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
+ config_kwargs["features"] = features
+ if data_files is not None:
+ config_kwargs["data_files"] = data_files
+ if data_dir is not None:
+ config_kwargs["data_dir"] = data_dir
+ self.config, self.config_id = self._create_builder_config(
+ config_name=config_name,
+ custom_features=features,
+ **config_kwargs,
+ )
+
+ # prepare info: DatasetInfo are a standardized dataclass across all datasets
+ # Prefill datasetinfo
+ if info is None:
+ info = self.get_exported_dataset_info()
+ info.update(self._info())
+ info.builder_name = self.name
+ info.config_name = self.config.name
+ info.version = self.config.version
+ self.info = info
+ # update info with user specified infos
+ if features is not None:
+ self.info.features = features
+
+ # Prepare data dirs:
+ # cache_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
+ self._cache_dir_root = (
+ self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
+ )
+ path_join = posixpath.join if is_remote_url(self._cache_dir_root) else os.path.join
+ self._cache_downloaded_dir = (
+ path_join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
+ if cache_dir
+ else str(config.DOWNLOADED_DATASETS_PATH)
+ )
+ self._cache_downloaded_dir = (
+ self._cache_downloaded_dir
+ if is_remote_url(self._cache_downloaded_dir)
+ else os.path.expanduser(self._cache_downloaded_dir)
+ )
+ self._cache_dir = self._build_cache_dir()
+ if not is_remote_url(self._cache_dir_root):
+ os.makedirs(self._cache_dir_root, exist_ok=True)
+ lock_path = os.path.join(self._cache_dir_root, self._cache_dir.replace(os.sep, "_") + ".lock")
+ with FileLock(lock_path):
+ if os.path.exists(self._cache_dir): # check if data exist
+ if len(os.listdir(self._cache_dir)) > 0:
+ if os.path.exists(path_join(self._cache_dir, config.DATASET_INFO_FILENAME)):
+ logger.info("Overwrite dataset info from restored data version if exists.")
+ self.info = DatasetInfo.from_directory(self._cache_dir)
+ else: # dir exists but no data, remove the empty dir as data aren't available anymore
+ logger.warning(
+ f"Old caching folder {self._cache_dir} for dataset {self.name} exists but no data were found. Removing it. "
+ )
+ os.rmdir(self._cache_dir)
+
+ # Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
+ self._output_dir = self._cache_dir
+ self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
+
+ # Set download manager
+ self.dl_manager = None
+
+ # Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
+ self._record_infos = False
+
+ # Enable streaming (e.g. it patches "open" to work with remote files)
+ extend_dataset_builder_for_streaming(self)
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-enable streaming, since patched functions are not kept when pickling
+ extend_dataset_builder_for_streaming(self)
+
+ # Must be set for datasets that use 'data_dir' functionality - the ones
+ # that require users to do additional steps to download the data
+ # (this is usually due to some external regulations / rules).
+ # This field should contain a string with user instructions, including
+ # the list of files that should be present. It will be
+ # displayed in the dataset documentation.
+ @property
+ def manual_download_instructions(self) -> Optional[str]:
+ return None
+
+ @classmethod
+ def get_all_exported_dataset_infos(cls) -> DatasetInfosDict:
+ """Empty dict if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_all_exported_dataset_infos()
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)}
+ ```
+ """
+ return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
+
+ def get_exported_dataset_info(self) -> DatasetInfo:
+ """Empty `DatasetInfo` if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_exported_dataset_info()
+ DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)
+ ```
+ """
+ return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
+
+ def _create_builder_config(
+ self, config_name=None, custom_features=None, **config_kwargs
+ ) -> Tuple[BuilderConfig, str]:
+ """Create and validate BuilderConfig object as well as a unique config id for this config.
+ Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
+ config_kwargs override the defaults kwargs in config
+ """
+ builder_config = None
+
+ # try default config
+ if config_name is None and self.BUILDER_CONFIGS and not config_kwargs:
+ if self.DEFAULT_CONFIG_NAME is not None:
+ builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME)
+ logger.warning(f"No config specified, defaulting to: {self.name}/{builder_config.name}")
+ else:
+ if len(self.BUILDER_CONFIGS) > 1:
+ example_of_usage = f"load_dataset('{self.name}', '{self.BUILDER_CONFIGS[0].name}')"
+ raise ValueError(
+ "Config name is missing."
+ f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}"
+ + f"\nExample of usage:\n\t`{example_of_usage}`"
+ )
+ builder_config = self.BUILDER_CONFIGS[0]
+ logger.info(f"No config specified, defaulting to the single config: {self.name}/{builder_config.name}")
+
+ # try to get config by name
+ if isinstance(config_name, str):
+ builder_config = self.builder_configs.get(config_name)
+ if builder_config is None and self.BUILDER_CONFIGS:
+ raise ValueError(
+ f"BuilderConfig {config_name} not found. Available: {list(self.builder_configs.keys())}"
+ )
+
+ # if not using an existing config, then create a new config on the fly
+ if not builder_config:
+ if config_name is not None:
+ config_kwargs["name"] = config_name
+ elif self.DEFAULT_CONFIG_NAME and not config_kwargs:
+ # Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed
+ config_kwargs["name"] = self.DEFAULT_CONFIG_NAME
+ if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
+ config_kwargs["version"] = self.VERSION
+ builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
+
+ # otherwise use the config_kwargs to overwrite the attributes
+ else:
+ builder_config = copy.deepcopy(builder_config)
+ for key, value in config_kwargs.items():
+ if value is not None:
+ if not hasattr(builder_config, key):
+ raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.")
+ setattr(builder_config, key, value)
+
+ if not builder_config.name:
+ raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}")
+
+ # compute the config id that is going to be used for caching
+ config_id = builder_config.create_config_id(
+ config_kwargs,
+ custom_features=custom_features,
+ )
+ is_custom = (config_id not in self.builder_configs) and config_id != "default"
+ if is_custom:
+ logger.info(f"Using custom data configuration {config_id}")
+ else:
+ if (
+ builder_config.name in self.builder_configs
+ and builder_config != self.builder_configs[builder_config.name]
+ ):
+ raise ValueError(
+ "Cannot name a custom BuilderConfig the same as an available "
+ f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}"
+ )
+ if not builder_config.version:
+ raise ValueError(f"BuilderConfig {builder_config.name} must have a version")
+ # if not builder_config.description:
+ # raise ValueError(f"BuilderConfig {builder_config.name} must have a description" )
+
+ return builder_config, config_id
+
+ @classproperty
+ @classmethod
+ @memoize()
+ def builder_configs(cls):
+ """Pre-defined list of configurations for this builder class."""
+ configs = {config.name: config for config in cls.BUILDER_CONFIGS}
+ if len(configs) != len(cls.BUILDER_CONFIGS):
+ names = [config.name for config in cls.BUILDER_CONFIGS]
+ raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}")
+ return configs
+
+ @property
+ def cache_dir(self):
+ return self._cache_dir
+
+ def _relative_data_dir(self, with_version=True, with_hash=True, is_local=True) -> str:
+ """Relative path of this dataset in cache_dir:
+ Will be:
+ self.name/self.config.version/self.hash/
+ or if a repo_id with a namespace has been specified:
+ self.namespace___self.name/self.config.version/self.hash/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ builder_data_dir = self.name if namespace is None else f"{namespace}___{self.name}"
+ builder_config = self.config
+ hash = self.hash
+ path_join = os.path.join if is_local else posixpath.join
+ if builder_config:
+ # use the enriched name instead of the name to make it unique
+ builder_data_dir = path_join(builder_data_dir, self.config_id)
+ if with_version:
+ builder_data_dir = path_join(builder_data_dir, str(self.config.version))
+ if with_hash and hash and isinstance(hash, str):
+ builder_data_dir = path_join(builder_data_dir, hash)
+ return builder_data_dir
+
+ def _build_cache_dir(self):
+ """Return the data directory for the current version."""
+ is_local = not is_remote_url(self._cache_dir_root)
+ path_join = os.path.join if is_local else posixpath.join
+ builder_data_dir = path_join(
+ self._cache_dir_root, self._relative_data_dir(with_version=False, is_local=is_local)
+ )
+ version_data_dir = path_join(
+ self._cache_dir_root, self._relative_data_dir(with_version=True, is_local=is_local)
+ )
+
+ def _other_versions_on_disk():
+ """Returns previous versions on disk."""
+ if not os.path.exists(builder_data_dir):
+ return []
+
+ version_dirnames = []
+ for dir_name in os.listdir(builder_data_dir):
+ try:
+ version_dirnames.append((utils.Version(dir_name), dir_name))
+ except ValueError: # Invalid version (ex: incomplete data dir)
+ pass
+ version_dirnames.sort(reverse=True)
+ return version_dirnames
+
+ # Check and warn if other versions exist
+ if not is_remote_url(builder_data_dir):
+ version_dirs = _other_versions_on_disk()
+ if version_dirs:
+ other_version = version_dirs[0][0]
+ if other_version != self.config.version:
+ warn_msg = (
+ f"Found a different version {str(other_version)} of dataset {self.name} in "
+ f"cache_dir {self._cache_dir_root}. Using currently defined version "
+ f"{str(self.config.version)}."
+ )
+ logger.warning(warn_msg)
+
+ return version_data_dir
+
+ @abc.abstractmethod
+ def _info(self) -> DatasetInfo:
+ """Construct the DatasetInfo object. See `DatasetInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (DatasetInfo) The dataset information
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def get_imported_module_dir(cls):
+ """Return the path of the module of this class or subclass."""
+ return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
+
+ def _rename(self, src: str, dst: str):
+ is_local = not is_remote_filesystem(self._fs)
+ if is_local:
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
+ shutil.move(self._fs._strip_protocol(src), self._fs._strip_protocol(dst))
+ else:
+ self._fs.mv(src, dst, recursive=True)
+
+ def download_and_prepare(
+ self,
+ output_dir: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ try_from_hf_gcs: bool = True,
+ dl_manager: Optional[DownloadManager] = None,
+ base_path: Optional[str] = None,
+ use_auth_token="deprecated",
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **download_and_prepare_kwargs,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ output_dir (`str`, *optional*):
+ Output directory for the dataset.
+ Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default.
+
+
+ download_config (`DownloadConfig`, *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, *optional*):
+ Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ try_from_hf_gcs (`bool`):
+ If `True`, it will try to download the already prepared dataset from the HF Google cloud storage.
+ dl_manager (`DownloadManager`, *optional*):
+ Specific `DownloadManger` to use.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files. This can be a remote url.
+ If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead.
+ use_auth_token (`Union[str, bool]`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from ~/.huggingface.
+
+
+
+ Pass `use_auth_token` to the initializer/`load_dataset_builder` instead.
+
+
+ file_format (`str`, *optional*):
+ Format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files.
+
+
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+
+
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the caching file-system backend, if any.
+
+
+ **download_and_prepare_kwargs (additional keyword arguments): Keyword arguments.
+
+ Example:
+
+ Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> ds = builder.download_and_prepare()
+ ```
+
+ Download and prepare the dataset as sharded Parquet files locally:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> ds = builder.download_and_prepare("./output_dir", file_format="parquet")
+ ```
+
+ Download and prepare the dataset as sharded Parquet files in a cloud storage:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key}
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> ds = builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet")
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass `use_auth_token` to the initializer/`load_dataset_builder` instead.",
+ FutureWarning,
+ )
+ else:
+ use_auth_token = self.use_auth_token
+
+ output_dir = output_dir if output_dir is not None else self._cache_dir
+ # output_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ fs_token_paths = fsspec.get_fs_token_paths(output_dir, storage_options=storage_options)
+ self._fs: fsspec.AbstractFileSystem = fs_token_paths[0]
+ is_local = not is_remote_filesystem(self._fs)
+ self._output_dir = fs_token_paths[2][0] if is_local else self._fs.unstrip_protocol(fs_token_paths[2][0])
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+ base_path = base_path if base_path is not None else self.base_path
+
+ if file_format is not None and file_format not in ["arrow", "parquet"]:
+ raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'")
+
+ if self._fs._strip_protocol(self._output_dir) == "":
+ # We don't support the root directory, because it has no dirname,
+ # and we need a dirname to use a .incomplete directory
+ # when the dataset is being written
+ raise RuntimeError(
+ f"Unable to download and prepare the dataset at the root {self._output_dir}. "
+ f"Please specify a subdirectory, e.g. '{self._output_dir + self.name}'"
+ )
+
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig(
+ cache_dir=self._cache_downloaded_dir,
+ force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ use_etag=False,
+ num_proc=num_proc,
+ use_auth_token=use_auth_token,
+ storage_options=self.storage_options,
+ ) # We don't use etag for data files to speed up the process
+
+ dl_manager = DownloadManager(
+ dataset_name=self.name,
+ download_config=download_config,
+ data_dir=self.config.data_dir,
+ base_path=base_path,
+ record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS),
+ )
+
+ if (
+ isinstance(dl_manager, MockDownloadManager)
+ or not is_local
+ or file_format != "arrow"
+ or max_shard_size is not None
+ ):
+ try_from_hf_gcs = False
+ self.dl_manager = dl_manager
+
+ # Prevent parallel local disk operations
+ if is_local:
+ # Create parent directory of the output_dir to put the lock file in there
+ Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
+ lock_path = self._output_dir + "_builder.lock"
+
+ # File locking only with local paths; no file locking on GCS or S3
+ with FileLock(lock_path) if is_local else contextlib.nullcontext():
+ # Check if the data already exists
+ path_join = os.path.join if is_local else posixpath.join
+ data_exists = self._fs.exists(path_join(self._output_dir, config.DATASET_INFO_FILENAME))
+ if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS:
+ logger.warning(f"Found cached dataset {self.name} ({self._output_dir})")
+ # We need to update the info in case some splits were added in the meantime
+ # for example when calling load_dataset from multiple workers.
+ self.info = self._load_info()
+ self.download_post_processing_resources(dl_manager)
+ return
+
+ logger.info(f"Generating dataset {self.name} ({self._output_dir})")
+ if is_local: # if cache dir is local, check for available space
+ if not has_sufficient_disk_space(
+ self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent
+ ):
+ raise OSError(
+ f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})"
+ )
+
+ @contextlib.contextmanager
+ def incomplete_dir(dirname):
+ """Create temporary dir for dirname and rename on exit."""
+ if not is_local:
+ self._fs.makedirs(dirname, exist_ok=True)
+ yield dirname
+ else:
+ tmp_dir = dirname + ".incomplete"
+ os.makedirs(tmp_dir, exist_ok=True)
+ try:
+ yield tmp_dir
+ if os.path.isdir(dirname):
+ shutil.rmtree(dirname)
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory
+ shutil.move(tmp_dir, dirname)
+ finally:
+ if os.path.exists(tmp_dir):
+ shutil.rmtree(tmp_dir)
+
+ # Print is intentional: we want this to always go to stdout so user has
+ # information needed to cancel download/preparation if needed.
+ # This comes right before the progress bar.
+ if self.info.size_in_bytes:
+ print(
+ f"Downloading and preparing dataset {self.info.builder_name}/{self.info.config_name} "
+ f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, "
+ f"post-processed: {size_str(self.info.post_processing_size)}, "
+ f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..."
+ )
+ else:
+ _dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir
+ print(
+ f"Downloading and preparing dataset {self.info.builder_name}/{self.info.config_name} to {_dest}..."
+ )
+
+ self._check_manual_download(dl_manager)
+
+ # Create a tmp dir and rename to self._output_dir on successful exit.
+ with incomplete_dir(self._output_dir) as tmp_output_dir:
+ # Temporarily assign _output_dir to tmp_data_dir to avoid having to forward
+ # it to every sub function.
+ with temporary_assignment(self, "_output_dir", tmp_output_dir):
+ # Try to download the already prepared dataset files
+ downloaded_from_gcs = False
+ if try_from_hf_gcs:
+ try:
+ self._download_prepared_from_hf_gcs(dl_manager.download_config)
+ downloaded_from_gcs = True
+ except (DatasetNotOnHfGcsError, MissingFilesOnHfGcsError):
+ logger.info("Dataset not on Hf google storage. Downloading and preparing it from source")
+ except ConnectionError:
+ logger.warning("HF google storage unreachable. Downloading and preparing it from source")
+ if not downloaded_from_gcs:
+ prepare_split_kwargs = {"file_format": file_format}
+ if max_shard_size is not None:
+ prepare_split_kwargs["max_shard_size"] = max_shard_size
+ if num_proc is not None:
+ prepare_split_kwargs["num_proc"] = num_proc
+ self._download_and_prepare(
+ dl_manager=dl_manager,
+ verification_mode=verification_mode,
+ **prepare_split_kwargs,
+ **download_and_prepare_kwargs,
+ )
+ # Sync info
+ self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
+ self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
+ self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
+ # Save info
+ self._save_info()
+
+ # Download post processing resources
+ self.download_post_processing_resources(dl_manager)
+
+ print(
+ f"Dataset {self.name} downloaded and prepared to {self._output_dir}. "
+ f"Subsequent calls will reuse this data."
+ )
+
+ def _check_manual_download(self, dl_manager):
+ if self.manual_download_instructions is not None and dl_manager.manual_dir is None:
+ raise ManualDownloadError(
+ textwrap.dedent(
+ f"""\
+ The dataset {self.name} with config {self.config.name} requires manual data.
+ Please follow the manual download instructions:
+ {self.manual_download_instructions}
+ Manual data can be loaded with:
+ datasets.load_dataset("{self.name}", data_dir="")"""
+ )
+ )
+
+ def _download_prepared_from_hf_gcs(self, download_config: DownloadConfig):
+ import pdb
+
+ pdb.set_trace()
+ relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
+ reader = ArrowReader(self._output_dir, self.info)
+ # use reader instructions to download the right files
+ reader.download_from_hf_gcs(download_config, relative_data_dir)
+ downloaded_info = DatasetInfo.from_directory(self._output_dir)
+ self.info.update(downloaded_info)
+ # download post processing resources
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ for split in self.info.splits:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ try:
+ resource_path = cached_path(remote_cache_dir + "/" + resource_file_name)
+ shutil.move(resource_path, os.path.join(self._output_dir, resource_file_name))
+ except ConnectionError:
+ logger.info(f"Couldn't download resourse file {resource_file_name} from Hf google storage.")
+ logger.info("Dataset downloaded from Hf google storage.")
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs):
+ """Downloads and prepares dataset for reading.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required data and generate
+ the pre-processed datasets files.
+
+ Args:
+ dl_manager ([`DownloadManager`]):
+ `DownloadManager` used to download and cache data.
+ verification_mode ([`VerificationMode`]):
+ if `ALL_CHECKS`, perform all the verifications including checksums.
+ if `BASIC_CHECKS`, do not perform checksums, only perform split tests.
+ if `NO_CHECKS`, do not perform any verification.
+ prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size`
+ """
+ # Generating data for all splits
+ split_dict = SplitDict(dataset_name=self.name)
+ split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
+ split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
+
+ # Checksums verification
+ if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:
+ verify_checksums(
+ self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
+ )
+
+ # Build splits
+ for split_generator in split_generators:
+ if str(split_generator.split_info.name).lower() == "all":
+ raise ValueError(
+ "`all` is a special split keyword corresponding to the "
+ "union of all splits, so cannot be used as key in "
+ "._split_generator()."
+ )
+
+ logger.info(f"Generating {split_generator.split_info.name} split")
+ split_dict.add(split_generator.split_info)
+
+ try:
+ # Prepare split will record examples associated to the split
+ self._prepare_split(split_generator, **prepare_split_kwargs)
+ except OSError as e:
+ raise OSError(
+ "Cannot find data file. "
+ + (self.manual_download_instructions or "")
+ + "\nOriginal error:\n"
+ + str(e)
+ ) from None
+ # If check_duplicates is set to True , then except DuplicatedKeysError
+ except DuplicatedKeysError as e:
+ raise DuplicatedKeysError(
+ e.key,
+ e.duplicate_key_indices,
+ fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py",
+ ) from None
+ dl_manager.manage_extracted_files()
+
+ if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
+ verify_splits(self.info.splits, split_dict)
+
+ # Update the info object with the splits.
+ self.info.splits = split_dict
+ self.info.download_size = dl_manager.downloaded_size
+
+ def download_post_processing_resources(self, dl_manager):
+ for split in self.info.splits or []:
+ for resource_name, resource_file_name in self._post_processing_resources(split).items():
+ if not not is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}")
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resource_path = os.path.join(self._output_dir, resource_file_name)
+ if not os.path.exists(resource_path):
+ downloaded_resource_path = self._download_post_processing_resources(
+ split, resource_name, dl_manager
+ )
+ if downloaded_resource_path:
+ logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}")
+ shutil.move(downloaded_resource_path, resource_path)
+
+ def _load_info(self) -> DatasetInfo:
+ return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_info(self):
+ is_local = not is_remote_filesystem(self._fs)
+ if is_local:
+ lock_path = self._output_dir + "_info.lock"
+ with FileLock(lock_path) if is_local else contextlib.nullcontext():
+ self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_infos(self):
+ is_local = not is_remote_filesystem(self._fs)
+ if is_local:
+ lock_path = self._output_dir + "_infos.lock"
+ with FileLock(lock_path) if is_local else contextlib.nullcontext():
+ DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ """Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
+ del prepare_split_kwargs
+ return {}
+
+ def as_dataset(
+ self,
+ split: Optional[Split] = None,
+ run_post_process=True,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ in_memory=False,
+ ) -> Union[Dataset, DatasetDict]:
+ """Return a Dataset for the specified split.
+
+ Args:
+ split (`datasets.Split`):
+ Which subset of the data to return.
+ run_post_process (`bool`, defaults to `True`):
+ Whether to run post-processing dataset transforms and/or add
+ indexes.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Whether to ignore the verifications of the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ datasets.Dataset
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds = builder.download_and_prepare()
+ >>> ds = builder.as_dataset(split='train')
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 8530
+ })
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = verification_mode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ is_local = not is_remote_filesystem(self._fs)
+ if not is_local:
+ raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.")
+ if not os.path.exists(self._output_dir):
+ raise FileNotFoundError(
+ f"Dataset {self.name}: could not find data in {self._output_dir}. Please make sure to call "
+ "builder.download_and_prepare(), or use "
+ "datasets.load_dataset() before trying to access the Dataset object."
+ )
+
+ logger.debug(f'Constructing Dataset for split {split or ", ".join(self.info.splits)}, from {self._output_dir}')
+
+ # By default, return all splits
+ if split is None:
+ split = {s: s for s in self.info.splits}
+
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ partial(
+ self._build_single_dataset,
+ run_post_process=run_post_process,
+ verification_mode=verification_mode,
+ in_memory=in_memory,
+ ),
+ split,
+ map_tuple=True,
+ disable_tqdm=not logging.is_progress_bar_enabled(),
+ )
+ if isinstance(datasets, dict):
+ datasets = DatasetDict(datasets)
+ return datasets
+
+ def _build_single_dataset(
+ self,
+ split: Union[str, ReadInstruction, Split],
+ run_post_process: bool,
+ verification_mode: VerificationMode,
+ in_memory: bool = False,
+ ):
+ """as_dataset for a single split."""
+ if not isinstance(split, ReadInstruction):
+ split = str(split)
+ if split == "all":
+ split = "+".join(self.info.splits.keys())
+ split = Split(split)
+
+ # Build base dataset
+ ds = self._as_dataset(
+ split=split,
+ in_memory=in_memory,
+ )
+ if run_post_process:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resources_paths = {
+ resource_name: os.path.join(self._output_dir, resource_file_name)
+ for resource_name, resource_file_name in self._post_processing_resources(split).items()
+ }
+ post_processed = self._post_process(ds, resources_paths)
+ if post_processed is not None:
+ ds = post_processed
+ recorded_checksums = {}
+ record_checksums = False
+ for resource_name, resource_path in resources_paths.items():
+ size_checksum = get_size_checksum_dict(resource_path)
+ recorded_checksums[resource_name] = size_checksum
+ if verification_mode == VerificationMode.ALL_CHECKS and record_checksums:
+ if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
+ expected_checksums = None
+ else:
+ expected_checksums = self.info.post_processed.resources_checksums.get(split)
+ verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
+ if self.info.post_processed is None:
+ self.info.post_processed = PostProcessedInfo()
+ if self.info.post_processed.resources_checksums is None:
+ self.info.post_processed.resources_checksums = {}
+ self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
+ self.info.post_processing_size = sum(
+ checksums_dict["num_bytes"]
+ for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
+ for checksums_dict in split_checksums_dicts.values()
+ )
+ if self.info.dataset_size is not None and self.info.download_size is not None:
+ self.info.size_in_bytes = (
+ self.info.dataset_size + self.info.download_size + self.info.post_processing_size
+ )
+ self._save_info()
+ ds._info.post_processed = self.info.post_processed
+ ds._info.post_processing_size = self.info.post_processing_size
+ ds._info.size_in_bytes = self.info.size_in_bytes
+ if self.info.post_processed.features is not None:
+ if self.info.post_processed.features.type != ds.features.type:
+ raise ValueError(
+ f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}"
+ )
+ else:
+ ds.info.features = self.info.post_processed.features
+
+ return ds
+
+ def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset:
+ """Constructs a `Dataset`.
+
+ This is the internal implementation to overwrite called when user calls
+ `as_dataset`. It should read the pre-processed datasets files and generate
+ the `Dataset` object.
+
+ Args:
+ split (`datasets.Split`):
+ which subset of the data to read.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ `Dataset`
+ """
+ cache_dir = self._fs._strip_protocol(self._output_dir)
+ dataset_kwargs = ArrowReader(cache_dir, self.info).read(
+ name=self.name,
+ instructions=split,
+ split_infos=self.info.splits.values(),
+ in_memory=in_memory,
+ )
+ fingerprint = self._get_dataset_fingerprint(split)
+ return Dataset(fingerprint=fingerprint, **dataset_kwargs)
+
+ def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str:
+ """The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs."""
+ hasher = Hasher()
+ hasher.update(self._relative_data_dir().replace(os.sep, "/"))
+ hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder)
+ fingerprint = hasher.hexdigest()
+ return fingerprint
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ base_path: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ # if not isinstance(self, (GeneratorBasedBuilder, ArrowBasedBuilder)):
+ # raise ValueError(f"Builder {self.name} is not streamable.")
+
+ is_local = not is_remote_filesystem(self._fs)
+ if not is_local:
+ raise NotImplementedError(
+ f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet."
+ )
+
+ dl_manager = StreamingDownloadManager(
+ base_path=base_path or self.base_path,
+ download_config=DownloadConfig(use_auth_token=self.use_auth_token, storage_options=self.storage_options),
+ dataset_name=self.name,
+ data_dir=self.config.data_dir,
+ )
+ self._check_manual_download(dl_manager)
+ splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}
+ # By default, return all splits
+ if split is None:
+ splits_generator = splits_generators
+ elif split in splits_generators:
+ splits_generator = splits_generators[split]
+ else:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}")
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ self._as_streaming_dataset_single,
+ splits_generator,
+ map_tuple=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _as_streaming_dataset_single(
+ self,
+ splits_generator,
+ ) -> IterableDataset:
+ ex_iterable = self._get_examples_iterable_for_split(splits_generator)
+ # add auth to be able to access and decode audio/image files from private repositories.
+ token_per_repo_id = {self.repo_id: self.use_auth_token} if self.repo_id else {}
+ return IterableDataset(
+ ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id
+ )
+
+ def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]:
+ """Run dataset transforms or add indexes"""
+ return None
+
+ def _post_processing_resources(self, split: str) -> Dict[str, str]:
+ """Mapping resource_name -> resource_file_name"""
+ return {}
+
+ def _download_post_processing_resources(
+ self, split: str, resource_name: str, dl_manager: DownloadManager
+ ) -> Optional[str]:
+ """Download the resource using the download manager and return the downloaded path."""
+ return None
+
+ @abc.abstractmethod
+ def _split_generators(self, dl_manager: DownloadManager):
+ """Specify feature dictionary generators and dataset splits.
+
+ This function returns a list of `SplitGenerator`s defining how to generate
+ data and what splits to use.
+
+ Example:
+
+ return [
+ datasets.SplitGenerator(
+ name=datasets.Split.TRAIN,
+ gen_kwargs={'file': 'train_data.zip'},
+ ),
+ datasets.SplitGenerator(
+ name=datasets.Split.TEST,
+ gen_kwargs={'file': 'test_data.zip'},
+ ),
+ ]
+
+ The above code will first call `_generate_examples(file='train_data.zip')`
+ to write the train data, then `_generate_examples(file='test_data.zip')` to
+ write the test data.
+
+ Datasets are typically split into different subsets to be used at various
+ stages of training and evaluation.
+
+ Note that for datasets without a `VALIDATION` split, you can use a
+ fraction of the `TRAIN` data for evaluation as you iterate on your model
+ so as not to overfit to the `TEST` data.
+
+ For downloads and extractions, use the given `download_manager`.
+ Note that the `DownloadManager` caches downloads, so it is fine to have each
+ generator attempt to download the source data.
+
+ A good practice is to download all data in this function, and then
+ distribute the relevant parts to each split with the `gen_kwargs` argument
+
+ Args:
+ dl_manager (`DownloadManager`):
+ Download manager to download the data
+
+ Returns:
+ `list`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Generate the examples and record them on disk.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ file_format (`str`, *optional*):
+ format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ **kwargs: Additional kwargs forwarded from _download_and_prepare (ex:
+ beam pipeline)
+ """
+ raise NotImplementedError()
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ """Generate the examples on the fly.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ """
+ raise NotImplementedError()
+
+
+class GeneratorBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on dict generators.
+
+ `GeneratorBasedBuilder` is a convenience class that abstracts away much
+ of the data writing and reading of `DatasetBuilder`. It expects subclasses to
+ implement generators of feature dictionaries across the dataset splits
+ (`_split_generators`). See the method docstrings for details.
+ """
+
+ @abc.abstractmethod
+ def _generate_examples(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `dict`, a feature dictionary
+ ready to be encoded and written to disk. The example will be
+ encoded with `self.info.features.encode_example({...})`.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ check_duplicate_keys: bool,
+ file_format="arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[int, str]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ is_local = not is_remote_filesystem(self._fs)
+ path_join = os.path.join if is_local else posixpath.join
+
+ if self.info.splits is not None:
+ split_info = self.info.splits[split_generator.name]
+ else:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = path_join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1 and num_proc is not None:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_proc is not None and num_input_shards < num_proc:
+ logger.info(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = logging.tqdm(
+ disable=not logging.is_progress_bar_enabled(),
+ unit=" examples",
+ total=split_info.num_examples,
+ leave=False,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ "split_info": split_info,
+ "check_duplicate_keys": check_duplicate_keys,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_and_job: Tuple[int]):
+ shard_id, job_id = shard_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shards_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self,
+ gen_kwargs: dict,
+ fpath: str,
+ file_format: str,
+ max_shard_size: int,
+ split_info: SplitInfo,
+ check_duplicate_keys: bool,
+ job_id: int,
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ generator = self._generate_examples(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for key, record in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ example = self.info.features.encode_example(record) if self.info.features is not None else record
+ writer.write(example, key)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ super()._download_and_prepare(
+ dl_manager,
+ verification_mode,
+ check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
+ or verification_mode == VerificationMode.ALL_CHECKS,
+ **prepare_splits_kwargs,
+ )
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs)
+
+
+class ArrowBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet)."""
+
+ @abc.abstractmethod
+ def _generate_tables(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `pyarrow.Table`, a feature table
+ ready to be encoded and written to disk.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[str, int]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ is_local = not is_remote_filesystem(self._fs)
+ path_join = os.path.join if is_local else posixpath.join
+
+ if self.info.splits is not None:
+ split_info = self.info.splits[split_generator.name]
+ else:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = path_join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1 and num_proc is not None:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_proc is not None and num_input_shards < num_proc:
+ logger.info(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = logging.tqdm(
+ disable=not logging.is_progress_bar_enabled(),
+ unit=" examples",
+ total=split_info.num_examples,
+ leave=False,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_id_and_job: Tuple[int]):
+ shard_id, job_id = shard_id_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shard_ids_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ generator = self._generate_tables(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for _, table in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ writer.write_table(table)
+ num_examples_progress_update += len(table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ExamplesIterable(
+ _generate_examples_from_tables_wrapper(self._generate_tables), kwargs=split_generator.gen_kwargs
+ )
+
+
+class MissingBeamOptions(ValueError):
+ pass
+
+
+class BeamBasedBuilder(DatasetBuilder):
+ """Beam-based Builder."""
+
+ def __init__(self, *args, beam_runner=None, beam_options=None, **kwargs):
+ self._beam_runner = beam_runner
+ self._beam_options = beam_options
+ self._beam_writers = {} # {split: beam_writer} mapping.
+ super().__init__(*args, **kwargs)
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ # Pass `pipeline` into `_split_generators()` from `prepare_split_kwargs` if
+ # it's in the call signature of `_split_generators()`.
+ # This allows for global preprocessing in beam.
+ split_generators_kwargs = {}
+ split_generators_arg_names = inspect.signature(self._split_generators).parameters.keys()
+ if "pipeline" in split_generators_arg_names:
+ split_generators_kwargs["pipeline"] = prepare_split_kwargs["pipeline"]
+ return split_generators_kwargs
+
+ @abc.abstractmethod
+ def _build_pcollection(self, pipeline, **kwargs):
+ """Build the beam pipeline examples for each `SplitGenerator`.
+
+ This function extracts examples from the raw data with parallel transforms
+ in a Beam pipeline. It is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples from the PCollection will be
+ encoded and written to disk.
+
+
+ Warning: When running in a distributed setup, make sure that the data
+ which will be read (download_dir, manual_dir,...) and written (cache_dir)
+ can be accessed by the workers jobs. The data should be located in a
+ shared filesystem, like GCS.
+
+
+ Args:
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
+ Apache Beam pipeline.
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs.
+
+ Returns:
+ `beam.PCollection`: Apache Beam PCollection containing the
+ example to send to `self.info.features.encode_example(...)`.
+
+ Example:
+
+ ```
+ def _build_pcollection(pipeline, extracted_dir=None):
+ return (
+ pipeline
+ | beam.Create(gfile.io.listdir(extracted_dir))
+ | beam.Map(_process_file)
+ )
+ ```
+ """
+ raise NotImplementedError()
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ # Create the Beam pipeline and forward it to `_prepare_split`
+ import apache_beam as beam
+
+ import datasets.utils.beam_utils as beam_utils
+
+ beam_runner = self._beam_runner
+ beam_options = self._beam_options
+
+ if not beam_runner and not beam_options:
+ usage_example = f"load_dataset('{self.name}', '{self.config.name}', beam_runner='DirectRunner')"
+ raise MissingBeamOptions(
+ "Trying to generate a dataset using Apache Beam, yet no Beam Runner "
+ "or PipelineOptions() has been provided in `load_dataset` or in the "
+ "builder arguments. For big datasets it has to run on large-scale data "
+ "processing tools like Dataflow, Spark, etc. More information about "
+ "Apache Beam runners at "
+ "https://beam.apache.org/documentation/runners/capability-matrix/"
+ "\nIf you really want to run it locally because you feel like the "
+ "Dataset is small enough, you can use the local beam runner called "
+ "`DirectRunner` (you may run out of memory). \nExample of usage: "
+ f"\n\t`{usage_example}`"
+ )
+ if self._writer_batch_size is not None:
+ logger.warning(
+ "`writer_batch_size` is not supported for beam pipelines yet. Using the default chunk size for writing."
+ )
+
+ # Beam type checking assumes transforms multiple outputs are of same type,
+ # which is not our case. Plus it doesn't handle correctly all types, so we
+ # are better without it.
+ pipeline_options = {"pipeline_type_check": False}
+ if "num_proc" in prepare_splits_kwargs:
+ num_workers = prepare_splits_kwargs.pop("num_proc")
+ pipeline_options["direct_num_workers"] = num_workers
+ pipeline_options["num_workers"] = num_workers
+ pipeline_options["direct_running_mode"] = "multi_processing"
+ # TODO: Fix ModuleNotFoundError: No module named 'datasets_modules' when running multiprocessed DirectRunner
+ raise NotImplementedError("Using a DirectRunner with `num_proc` for multiprocessing it not supported yet.")
+ beam_options = beam_options or beam.options.pipeline_options.PipelineOptions.from_dictionary(pipeline_options)
+ # Use a single pipeline for all splits
+ pipeline = beam_utils.BeamPipeline(
+ runner=beam_runner,
+ options=beam_options,
+ )
+ super()._download_and_prepare(
+ dl_manager, verification_mode=VerificationMode.NO_CHECKS, pipeline=pipeline, **prepare_splits_kwargs
+ ) # TODO handle verification_mode in beam datasets
+ # Run pipeline
+ pipeline_results = pipeline.run()
+ pipeline_results.wait_until_finish()
+ metrics = pipeline_results.metrics()
+ # Update `info.splits`.
+ split_dict = self.info.splits
+ for split_name, beam_writer in self._beam_writers.items():
+ m_filter = beam.metrics.MetricsFilter().with_namespace(namespace=split_name)
+ num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter))
+ split_info = split_dict[split_name]
+ split_info.num_examples = num_examples
+ split_info.num_bytes = num_bytes
+ if hasattr(beam_writer, "_shard_lengths") and len(beam_writer._shard_lengths) > 1:
+ # keep the -SSSSS-of-NNNNN pattern
+ split_info.shard_lengths = beam_writer._shard_lengths
+ else:
+ # don't use any pattern
+ file_format = prepare_splits_kwargs.get("file_format", "arrow")
+ src_fname = f"{self.name}-{split_name}-00000-of-00001.{file_format}"
+ dst_fname = f"{self.name}-{split_name}.{file_format}"
+ path_join = os.path.join if not is_remote_filesystem(self._fs) else posixpath.join
+ src_fpath = path_join(self._output_dir, src_fname)
+ dst_fpath = path_join(self._output_dir, dst_fname)
+ self._rename(src_fpath, dst_fpath)
+
+ def _save_info(self):
+ import apache_beam as beam
+
+ fs = beam.io.filesystems.FileSystems
+ path_join = os.path.join if not is_remote_filesystem(self._fs) else posixpath.join
+ with fs.create(path_join(self._output_dir, config.DATASET_INFO_FILENAME)) as f:
+ self.info._dump_info(f)
+ if self.info.license:
+ with fs.create(path_join(self._output_dir, config.LICENSE_FILENAME)) as f:
+ self.info._dump_license(f)
+
+ def _prepare_split(
+ self, split_generator, pipeline, file_format="arrow", max_shard_size: Optional[Union[str, int]] = None
+ ):
+ import apache_beam as beam
+
+ if max_shard_size is not None:
+ raise NotImplementedError(
+ "max_shard_size is not supported for Beam datasets."
+ "Please set it to None to use the default Apache Beam sharding and get the best performance."
+ )
+
+ # To write examples in filesystem:
+ split_name = split_generator.split_info.name
+ fname = f"{self.name}-{split_name}.{file_format}"
+ path_join = os.path.join if not is_remote_filesystem(self._fs) else posixpath.join
+ fpath = path_join(self._output_dir, fname)
+ beam_writer = BeamWriter(
+ features=self.info.features, path=fpath, namespace=split_name, cache_dir=self._output_dir
+ )
+ self._beam_writers[split_name] = beam_writer
+
+ encode_example = self.info.features.encode_example
+
+ # Note: We need to wrap the pipeline in a PTransform to avoid re-using the
+ # same label names for each split
+ @beam.ptransform_fn
+ def _build_pcollection(pipeline):
+ """PTransformation which build a single split."""
+ # Encode the PCollection
+ pcoll_examples = self._build_pcollection(pipeline, **split_generator.gen_kwargs)
+ pcoll_examples |= "Encode" >> beam.Map(lambda key_ex: (key_ex[0], encode_example(key_ex[1])))
+ return beam_writer.write_from_pcollection(pcoll_examples)
+
+ # Add the PCollection to the pipeline
+ _ = pipeline | split_name >> _build_pcollection() # pylint: disable=no-value-for-parameter max_bytes_per_shard
+
+ def as_streaming_dataset_original(
+ self,
+ split: Optional[str] = None,
+ base_path: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ # From: .download_and_prepare
+ # self._download_prepared_from_hf_gcs(dl_manager.download_config)
+ relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ # Get info to get splits, instead of using _prepare_splits
+ try:
+ remote_dataset_info = f"{remote_cache_dir}/dataset_info.json"
+ from .download.streaming_download_manager import xopen
+
+ with xopen(remote_dataset_info) as f:
+ import json
+
+ _info = json.load(f)
+ if self.info is not None:
+ self.info.update(DatasetInfo.from_dict(_info))
+ except FileNotFoundError as err:
+ raise DatasetNotOnHfGcsError(err) from None
+
+ # Better: if self.info?
+ try:
+ # From: ArrowReader.download_from_hf_gcs
+ for split in self.info.splits:
+ # file_instructions = ArrowReader.get_file_instructions(
+ # name=self.info.builder_name,
+ # instruction=split,
+ # split_infos=self.info.splits.values(),
+ # ) # TODO: needed? I only need: f"{name}-{split}.arrow"
+ import pdb
+
+ pdb.set_trace()
+ file_instructions = [
+ {"filename": f"{self.name}-{split}.arrow"}
+ ] # TODO: self.name OR self.info.builder_name
+ for file_instruction in file_instructions:
+ remote_prepared_filename = os.path.join(remote_cache_dir, file_instruction["filename"])
+ # downloaded_prepared_filename = cached_path(
+ # remote_prepared_filename.replace(os.sep, "/"), download_config=download_config
+ # )
+ # shutil.move(downloaded_prepared_filename, os.path.join(self._path, file_instruction["filename"]))
+ # TODO: iterate over the table
+ import pyarrow as pa
+
+ with xopen(remote_prepared_filename, "rb") as f:
+ with pa.ipc.open_stream(f) as reader: # open_file
+ for record_batch in reader:
+ yield from record_batch.to_pylist()
+ # import pdb;pdb.set_trace()
+ # try:
+ # yield from record_batch.to_pylist()
+ # except AttributeError:
+ # import pdb;pdb.set_trace()
+ # for offset in range(record_batch.num_rows):
+ # yield record_batch.slice(offset=offset, length=1).to_pydict()
+ # TODO: this is WRONG!
+ # return IterableDataset(
+ # _iterate(remote_prepared_filename),
+ # info=self.info, # split=splits_generator.name, token_per_repo_id=token_per_repo_id
+ # )
+
+ except FileNotFoundError as err:
+ raise MissingFilesOnHfGcsError(err) from None
+
+ def as_streaming_dataset_bak(
+ self,
+ split: Optional[str] = None,
+ base_path: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ import pdb
+
+ pdb.set_trace()
+ split_generators = self._create_split_generators_from_prepared()
+ # yield from split_generators["train"]
+ # yield from IterableDataset(split_generators["train"], info=self.info, split="train")
+ datasets = {
+ split_name: IterableDataset(ExamplesIterable(split_generator, {}), info=self.info, split=split_name)
+ for split_name, split_generator in split_generators.items()
+ }
+ if split:
+ try:
+ datasets = datasets[split]
+ except KeyError:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(datasets)}")
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ # Better: if self.info:
+ try:
+ for split in self.info.splits:
+ # file_instructions = ArrowReader.get_file_instructions(
+ # name=self.info.builder_name,
+ # instruction=split,
+ # split_infos=self.info.splits.values(),
+ # ) # TODO: needed? I only need: f"{name}-{split}.arrow"
+ import pdb
+
+ pdb.set_trace()
+ file_instructions = [
+ {"filename": f"{self.name}-{split}.arrow"}
+ ] # TODO: self.name OR self.info.builder_name
+ for file_instruction in file_instructions:
+ remote_prepared_filename = os.path.join(remote_cache_dir, file_instruction["filename"])
+ # downloaded_prepared_filename = cached_path(
+ # remote_prepared_filename.replace(os.sep, "/"), download_config=download_config
+ # )
+ # shutil.move(downloaded_prepared_filename, os.path.join(self._path, file_instruction["filename"]))
+ # TODO: iterate over the table
+ import pyarrow as pa
+
+ with xopen(remote_prepared_filename, "rb") as f:
+ with pa.ipc.open_stream(f) as reader: # open_file
+ for record_batch in reader:
+ yield from record_batch.to_pylist()
+ # import pdb;pdb.set_trace()
+ # try:
+ # yield from record_batch.to_pylist()
+ # except AttributeError:
+ # import pdb;pdb.set_trace()
+ # for offset in range(record_batch.num_rows):
+ # yield record_batch.slice(offset=offset, length=1).to_pydict()
+ # TODO: this is WRONG!
+ # return IterableDataset(
+ # _iterate(remote_prepared_filename),
+ # info=self.info, # split=splits_generator.name, token_per_repo_id=token_per_repo_id
+ # )
+
+ except FileNotFoundError as err:
+ raise MissingFilesOnHfGcsError(err) from None
+
+ # def _create_split_generators_from_prepared(self):
+ # # From: .download_and_prepare
+ # # self._download_prepared_from_hf_gcs(dl_manager.download_config)
+ # relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
+ # remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ # # Get info to get splits, instead of using _prepare_splits
+ # try:
+ # remote_dataset_info = f"{remote_cache_dir}/dataset_info.json"
+ # from .download.streaming_download_manager import xopen
+ # with xopen(remote_dataset_info) as f:
+ # import json
+ # _info = json.load(f)
+ # if self.info is not None:
+ # self.info.update(DatasetInfo.from_dict(_info))
+ # except FileNotFoundError as err:
+ # raise DatasetNotOnHfGcsError(err) from None
+ #
+ # return {split: _generate_examples_from_arrow(remote_cache_dir + "/" + f"{self.name}-{split}.arrow") for split in self.info.splits}
+
+ # def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ # return ExamplesIterable(
+ # _generate_examples_from_tables_wrapper(self._generate_tables), kwargs=split_generator.gen_kwargs
+ # )
+ # def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ # return ExamplesIterable(
+ # _generate_examples_from_arrow(self._generate_tables), kwargs=split_generator.gen_kwargs
+ # )
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ base_path: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ datasets = {
+ split: IterableDataset(self._get_examples_iterable_for_split(split), info=self.info, split=split)
+ for split in self.info.splits
+ }
+ if split:
+ try:
+ datasets = datasets[split]
+ except KeyError:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(datasets)}")
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _get_examples_iterable_for_split(self, split): # , split_generator: SplitGenerator) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples_from_hf_gcs, {"split": split}) # , split_generator.gen_kwargs)
+
+ def _generate_examples_from_hf_gcs(self, split): # , split_name):
+ import pyarrow as pa
+
+ from .download.streaming_download_manager import xopen
+
+ relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ remote_prepared_filename = remote_cache_dir + "/" + f"{self.name}-{split}.arrow"
+ with xopen(remote_prepared_filename, "rb") as f:
+ with pa.ipc.open_stream(f) as reader: # open_file
+ key = 0
+ for record_batch in reader:
+ # yield from record_batch.to_pylist()
+ for record in record_batch.to_pylist():
+ yield key, record
+ key += 1
+
+
+# def _generate_examples_from_arrow(path):
+# import pyarrow as pa
+# from .download.streaming_download_manager import xopen
+# with xopen(path, "rb") as f:
+# with pa.ipc.open_stream(f) as reader: # open_file
+# key = 0
+# for record_batch in reader:
+# # yield from record_batch.to_pylist()
+# for record in record_batch.to_pylist():
+# yield key, record
+# key += 1
diff --git a/venv/lib/python3.10/site-packages/datasets/builder.py b/venv/lib/python3.10/site-packages/datasets/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..85f7d88b7dbf7bc01589b70fe8c539224e6700cf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/builder.py
@@ -0,0 +1,2293 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""DatasetBuilder base class."""
+
+import abc
+import contextlib
+import copy
+import inspect
+import os
+import posixpath
+import shutil
+import textwrap
+import time
+import urllib
+import warnings
+from dataclasses import dataclass
+from functools import partial
+from pathlib import Path
+from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, Union
+from unittest.mock import patch
+
+import fsspec
+import pyarrow as pa
+from fsspec.core import url_to_fs
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from . import config, utils
+from .arrow_dataset import Dataset
+from .arrow_reader import (
+ HF_GCP_BASE_URL,
+ ArrowReader,
+ DatasetNotOnHfGcsError,
+ MissingFilesOnHfGcsError,
+ ReadInstruction,
+)
+from .arrow_writer import ArrowWriter, BeamWriter, ParquetWriter, SchemaInferenceError
+from .data_files import DataFilesDict, DataFilesPatternsDict, sanitize_patterns
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager, DownloadMode
+from .download.mock_download_manager import MockDownloadManager
+from .download.streaming_download_manager import StreamingDownloadManager, xjoin, xopen
+from .exceptions import DatasetGenerationCastError, DatasetGenerationError, FileFormatError, ManualDownloadError
+from .features import Features
+from .filesystems import (
+ is_remote_filesystem,
+ rename,
+)
+from .fingerprint import Hasher
+from .info import DatasetInfo, DatasetInfosDict, PostProcessedInfo
+from .iterable_dataset import ArrowExamplesIterable, ExamplesIterable, IterableDataset
+from .keyhash import DuplicatedKeysError
+from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase
+from .splits import Split, SplitDict, SplitGenerator, SplitInfo
+from .streaming import extend_dataset_builder_for_streaming
+from .table import CastError
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils._filelock import FileLock
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import cached_path, is_remote_url
+from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits
+from .utils.py_utils import (
+ classproperty,
+ convert_file_size_to_int,
+ has_sufficient_disk_space,
+ iflatmap_unordered,
+ map_nested,
+ memoize,
+ size_str,
+ temporary_assignment,
+)
+from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs
+from .utils.track import tracked_list
+
+
+if TYPE_CHECKING:
+ from .load import DatasetModule
+
+
+logger = logging.get_logger(__name__)
+
+
+class InvalidConfigName(ValueError):
+ pass
+
+
+@dataclass
+class BuilderConfig:
+ """Base class for `DatasetBuilder` data configuration.
+
+ `DatasetBuilder` subclasses with data configuration options should subclass
+ `BuilderConfig` and add their own properties.
+
+ Attributes:
+ name (`str`, defaults to `default`):
+ The name of the configuration.
+ version (`Version` or `str`, defaults to `0.0.0`):
+ The version of the configuration.
+ data_dir (`str`, *optional*):
+ Path to the directory containing the source data.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ description (`str`, *optional*):
+ A human description of the configuration.
+ """
+
+ name: str = "default"
+ version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
+ data_dir: Optional[str] = None
+ data_files: Optional[Union[DataFilesDict, DataFilesPatternsDict]] = None
+ description: Optional[str] = None
+
+ def __post_init__(self):
+ # The config name is used to name the cache directory.
+ for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
+ if invalid_char in self.name:
+ raise InvalidConfigName(
+ f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
+ f"They could create issues when creating a directory for this config on Windows filesystem."
+ )
+ if self.data_files is not None and not isinstance(self.data_files, (DataFilesDict, DataFilesPatternsDict)):
+ raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}")
+
+ def __eq__(self, o):
+ # we need to override the default dataclass __eq__ since it doesn't check for
+ # other attributes that the ones of the signature.
+ if set(self.__dict__.keys()) != set(o.__dict__.keys()):
+ return False
+ return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
+
+ def create_config_id(
+ self,
+ config_kwargs: dict,
+ custom_features: Optional[Features] = None,
+ ) -> str:
+ """
+ The config id is used to build the cache directory.
+ By default it is equal to the config name.
+ However the name of a config is not sufficient to have a unique identifier for the dataset being generated
+ since it doesn't take into account:
+ - the config kwargs that can be used to overwrite attributes
+ - the custom features used to write the dataset
+ - the data_files for json/text/csv/pandas datasets
+
+ Therefore the config id is just the config name with an optional suffix based on these.
+ """
+ # Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
+ suffix: Optional[str] = None
+ config_kwargs_to_add_to_suffix = config_kwargs.copy()
+ # name and version are already used to build the cache directory
+ config_kwargs_to_add_to_suffix.pop("name", None)
+ config_kwargs_to_add_to_suffix.pop("version", None)
+ # data dir handling (when specified it points to the manually downloaded data):
+ # it was previously ignored before the introduction of config id because we didn't want
+ # to change the config name. Now it's fine to take it into account for the config id.
+ # config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ if "data_dir" in config_kwargs_to_add_to_suffix:
+ if config_kwargs_to_add_to_suffix["data_dir"] is None:
+ config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ else:
+ # canonicalize the data dir to avoid two paths to the same location having different
+ # hashes
+ data_dir = config_kwargs_to_add_to_suffix["data_dir"]
+ data_dir = os.path.normpath(data_dir)
+ config_kwargs_to_add_to_suffix["data_dir"] = data_dir
+ if config_kwargs_to_add_to_suffix:
+ # we don't care about the order of the kwargs
+ config_kwargs_to_add_to_suffix = {
+ k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
+ }
+ if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
+ suffix = ",".join(
+ str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
+ )
+ if len(suffix) > 32: # hash if too long
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+ else:
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+
+ if custom_features is not None:
+ m = Hasher()
+ if suffix:
+ m.update(suffix)
+ m.update(custom_features)
+ suffix = m.hexdigest()
+
+ if suffix:
+ config_id = self.name + "-" + suffix
+ if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
+ config_id = self.name + "-" + Hasher.hash(suffix)
+ return config_id
+ else:
+ return self.name
+
+ def _resolve_data_files(self, base_path: str, download_config: DownloadConfig) -> None:
+ if isinstance(self.data_files, DataFilesPatternsDict):
+ base_path = xjoin(base_path, self.data_dir) if self.data_dir else base_path
+ self.data_files = self.data_files.resolve(base_path, download_config)
+
+
+class DatasetBuilder:
+ """Abstract base class for all datasets.
+
+ `DatasetBuilder` has 3 key methods:
+
+ - [`DatasetBuilder.info`]: Documents the dataset, including feature
+ names, types, shapes, version, splits, citation, etc.
+ - [`DatasetBuilder.download_and_prepare`]: Downloads the source data
+ and writes it to disk.
+ - [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
+
+ Some `DatasetBuilder`s expose multiple variants of the
+ dataset by defining a [`BuilderConfig`] subclass and accepting a
+ config object (or name) on construction. Configurable datasets expose a
+ pre-defined set of configurations in [`DatasetBuilder.builder_configs`].
+
+ Args:
+ cache_dir (`str`, *optional*):
+ Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
+ dataset_name (`str`, *optional*):
+ Name of the dataset, if different from the builder name. Useful for packaged builders
+ like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets
+ that use the same packaged builder.
+ config_name (`str`, *optional*):
+ Name of the dataset configuration.
+ It affects the data generated on disk. Different configurations will have their own subdirectories and
+ versions.
+ If not provided, the default configuration is used (if it exists).
+
+
+
+ Parameter `name` was renamed to `config_name`.
+
+
+ hash (`str`, *optional*):
+ Hash specific to the dataset code. Used to update the caching directory when the
+ dataset loading script code is updated (to avoid reusing old data).
+ The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files.
+ This can be a remote URL.
+ features ([`Features`], *optional*):
+ Features types to use with this dataset.
+ It can be used to change the [`Features`] types of a dataset, for example.
+ token (`str` or `bool`, *optional*):
+ String or boolean to use as Bearer token for remote files on the
+ Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
+ repo_id (`str`, *optional*):
+ ID of the dataset repository.
+ Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
+ and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ For builders like "csv" or "json" that need the user to specify data files. They can be either
+ local or remote files. For convenience, you can use a `DataFilesDict`.
+ data_dir (`str`, *optional*):
+ Path to directory containing source data file(s).
+ Use only if `data_files` is not passed, in which case it is equivalent to passing
+ `os.path.join(data_dir, "**")` as `data_files`.
+ For builders that require manual download, it must be the path to the local directory containing the
+ manually downloaded data.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
+ writer_batch_size (`int`, *optional*):
+ Batch size used by the ArrowWriter.
+ It defines the number of samples that are kept in memory before writing them
+ and also the length of the arrow chunks.
+ None means that the ArrowWriter will use its default value.
+ name (`str`): Configuration name for the dataset.
+
+
+
+ Use `config_name` instead.
+
+
+
+ **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
+ configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
+ configuration class is [`BuilderConfig`] or a subclass of it.
+ """
+
+ # Default version
+ VERSION = None # Default version set in BuilderConfig
+
+ # Class for the builder config.
+ BUILDER_CONFIG_CLASS = BuilderConfig
+
+ # Named configurations that modify the data generated by download_and_prepare.
+ BUILDER_CONFIGS = []
+
+ # Optional default config name to be used when name is None
+ DEFAULT_CONFIG_NAME = None
+
+ # Default batch size used by the ArrowWriter
+ # It defines the number of samples that are kept in memory before writing them
+ # and also the length of the arrow chunks
+ # None means that the ArrowWriter will use its default value
+ DEFAULT_WRITER_BATCH_SIZE = None
+
+ def __init__(
+ self,
+ cache_dir: Optional[str] = None,
+ dataset_name: Optional[str] = None,
+ config_name: Optional[str] = None,
+ hash: Optional[str] = None,
+ base_path: Optional[str] = None,
+ info: Optional[DatasetInfo] = None,
+ features: Optional[Features] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ repo_id: Optional[str] = None,
+ data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
+ data_dir: Optional[str] = None,
+ storage_options: Optional[dict] = None,
+ writer_batch_size: Optional[int] = None,
+ name="deprecated",
+ **config_kwargs,
+ ):
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if name != "deprecated":
+ warnings.warn(
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+ config_name = name
+ # DatasetBuilder name
+ self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
+ self.hash: Optional[str] = hash
+ self.base_path = base_path
+ self.token = token
+ # For backwards compatibility (e.g. if accessed in a dataset script)
+ self.use_auth_token = token
+ self.repo_id = repo_id
+ self.storage_options = storage_options or {}
+ self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name
+ self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
+
+ if data_files is not None and not isinstance(data_files, DataFilesDict):
+ data_files = DataFilesDict.from_patterns(
+ sanitize_patterns(data_files),
+ base_path=base_path,
+ download_config=DownloadConfig(token=token, storage_options=self.storage_options),
+ )
+
+ # Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
+ if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
+ config_kwargs["features"] = features
+ if data_files is not None:
+ config_kwargs["data_files"] = data_files
+ if data_dir is not None:
+ config_kwargs["data_dir"] = data_dir
+ self.config_kwargs = config_kwargs
+ self.config, self.config_id = self._create_builder_config(
+ config_name=config_name,
+ custom_features=features,
+ **config_kwargs,
+ )
+
+ # prepare info: DatasetInfo are a standardized dataclass across all datasets
+ # Prefill datasetinfo
+ if info is None:
+ # TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
+ info = self.get_exported_dataset_info()
+ info.update(self._info())
+ info.builder_name = self.name
+ info.dataset_name = self.dataset_name
+ info.config_name = self.config.name
+ info.version = self.config.version
+ self.info = info
+ # update info with user specified infos
+ if features is not None:
+ self.info.features = features
+
+ # Prepare data dirs:
+ # cache_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
+ self._cache_dir_root = (
+ self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
+ )
+ self._cache_downloaded_dir = (
+ posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
+ if cache_dir
+ else str(config.DOWNLOADED_DATASETS_PATH)
+ )
+ self._cache_downloaded_dir = (
+ self._cache_downloaded_dir
+ if is_remote_url(self._cache_downloaded_dir)
+ else os.path.expanduser(self._cache_downloaded_dir)
+ )
+
+ # In case there exists a legacy cache directory
+ self._legacy_relative_data_dir = None
+
+ self._cache_dir = self._build_cache_dir()
+ if not is_remote_url(self._cache_dir_root):
+ os.makedirs(self._cache_dir_root, exist_ok=True)
+ lock_path = os.path.join(
+ self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock"
+ )
+ with FileLock(lock_path):
+ if os.path.exists(self._cache_dir): # check if data exist
+ if len(os.listdir(self._cache_dir)) > 0:
+ if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)):
+ logger.info("Overwrite dataset info from restored data version if exists.")
+ self.info = DatasetInfo.from_directory(self._cache_dir)
+ else: # dir exists but no data, remove the empty dir as data aren't available anymore
+ logger.warning(
+ f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. "
+ )
+ os.rmdir(self._cache_dir)
+
+ # Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
+ self._output_dir = self._cache_dir
+ self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
+
+ # Set download manager
+ self.dl_manager = None
+
+ # Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
+ self._record_infos = False
+
+ # Set in `.download_and_prepare` once the format of the generated dataset is known
+ self._file_format = None
+
+ # Enable streaming (e.g. it patches "open" to work with remote files)
+ extend_dataset_builder_for_streaming(self)
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-enable streaming, since patched functions are not kept when pickling
+ extend_dataset_builder_for_streaming(self)
+
+ # Must be set for datasets that use 'data_dir' functionality - the ones
+ # that require users to do additional steps to download the data
+ # (this is usually due to some external regulations / rules).
+ # This field should contain a string with user instructions, including
+ # the list of files that should be present. It will be
+ # displayed in the dataset documentation.
+ @property
+ def manual_download_instructions(self) -> Optional[str]:
+ return None
+
+ def _check_legacy_cache(self) -> Optional[str]:
+ """Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13"""
+ if (
+ self.__module__.startswith("datasets.")
+ and not is_remote_url(self._cache_dir_root)
+ and self.config.name == "default"
+ ):
+ from .packaged_modules import _PACKAGED_DATASETS_MODULES
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
+ config_id = config_name + self.config_id[len(self.config.name) :]
+ hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
+ legacy_relative_data_dir = posixpath.join(
+ self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
+ config_id,
+ "0.0.0",
+ hash,
+ )
+ legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
+ if os.path.isdir(legacy_cache_dir):
+ return legacy_relative_data_dir
+
+ def _check_legacy_cache2(self, dataset_module: "DatasetModule") -> Optional[str]:
+ """Check for the old cache directory template {cache_dir}/{namespace}___{dataset_name}/{config_name}-xxx from 2.14 and 2.15"""
+ if (
+ self.__module__.startswith("datasets.")
+ and not is_remote_url(self._cache_dir_root)
+ and not (set(self.config_kwargs) - {"data_files", "data_dir"})
+ ):
+ from .packaged_modules import _PACKAGED_DATASETS_MODULES
+ from .utils._dill import Pickler
+
+ def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str:
+ """
+ Used to update hash of packaged modules which is used for creating unique cache directories to reflect
+ different config parameters which are passed in metadata from readme.
+ """
+ params_to_exclude = {"config_name", "version", "description"}
+ params_to_add_to_hash = {
+ param: value
+ for param, value in sorted(config_parameters.items())
+ if param not in params_to_exclude
+ }
+ m = Hasher()
+ m.update(hash)
+ m.update(params_to_add_to_hash)
+ return m.hexdigest()
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ with patch.object(Pickler, "_legacy_no_dict_keys_sorting", True):
+ config_id = self.config.name + "-" + Hasher.hash({"data_files": self.config.data_files})
+ hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
+ if (
+ dataset_module.builder_configs_parameters.metadata_configs
+ and self.config.name in dataset_module.builder_configs_parameters.metadata_configs
+ ):
+ hash = update_hash_with_config_parameters(
+ hash, dataset_module.builder_configs_parameters.metadata_configs[self.config.name]
+ )
+ legacy_relative_data_dir = posixpath.join(
+ self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
+ config_id,
+ "0.0.0",
+ hash,
+ )
+ legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
+ if os.path.isdir(legacy_cache_dir):
+ return legacy_relative_data_dir
+
+ @classmethod
+ def get_all_exported_dataset_infos(cls) -> DatasetInfosDict:
+ """Empty dict if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_all_exported_dataset_infos()
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)}
+ ```
+ """
+ return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
+
+ def get_exported_dataset_info(self) -> DatasetInfo:
+ """Empty `DatasetInfo` if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_exported_dataset_info()
+ DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)
+ ```
+ """
+ return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
+
+ def _create_builder_config(
+ self, config_name=None, custom_features=None, **config_kwargs
+ ) -> Tuple[BuilderConfig, str]:
+ """Create and validate BuilderConfig object as well as a unique config id for this config.
+ Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
+ config_kwargs override the defaults kwargs in config
+ """
+ builder_config = None
+
+ # try default config
+ if config_name is None and self.BUILDER_CONFIGS:
+ if self.DEFAULT_CONFIG_NAME is not None:
+ builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME)
+ logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}")
+ else:
+ if len(self.BUILDER_CONFIGS) > 1:
+ if not config_kwargs:
+ example_of_usage = f"load_dataset('{self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')"
+ raise ValueError(
+ "Config name is missing."
+ f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}"
+ + f"\nExample of usage:\n\t`{example_of_usage}`"
+ )
+ else:
+ builder_config = self.BUILDER_CONFIGS[0]
+ logger.info(
+ f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}"
+ )
+
+ # try to get config by name
+ if isinstance(config_name, str):
+ builder_config = self.builder_configs.get(config_name)
+ if builder_config is None and self.BUILDER_CONFIGS:
+ raise ValueError(
+ f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}"
+ )
+
+ # if not using an existing config, then create a new config on the fly
+ if not builder_config:
+ if config_name is not None:
+ config_kwargs["name"] = config_name
+ elif self.DEFAULT_CONFIG_NAME and not config_kwargs:
+ # Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed
+ config_kwargs["name"] = self.DEFAULT_CONFIG_NAME
+ if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
+ config_kwargs["version"] = self.VERSION
+ builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
+
+ # otherwise use the config_kwargs to overwrite the attributes
+ else:
+ builder_config = copy.deepcopy(builder_config) if config_kwargs else builder_config
+ for key, value in config_kwargs.items():
+ if value is not None:
+ if not hasattr(builder_config, key):
+ raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.")
+ setattr(builder_config, key, value)
+
+ if not builder_config.name:
+ raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}")
+
+ # resolve data files if needed
+ builder_config._resolve_data_files(
+ base_path=self.base_path,
+ download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
+ )
+
+ # compute the config id that is going to be used for caching
+ config_id = builder_config.create_config_id(
+ config_kwargs,
+ custom_features=custom_features,
+ )
+ is_custom = (config_id not in self.builder_configs) and config_id != "default"
+ if is_custom:
+ logger.info(f"Using custom data configuration {config_id}")
+ else:
+ if (
+ builder_config.name in self.builder_configs
+ and builder_config != self.builder_configs[builder_config.name]
+ ):
+ raise ValueError(
+ "Cannot name a custom BuilderConfig the same as an available "
+ f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}"
+ )
+ if not builder_config.version:
+ raise ValueError(f"BuilderConfig {builder_config.name} must have a version")
+
+ return builder_config, config_id
+
+ @classproperty
+ @classmethod
+ @memoize()
+ def builder_configs(cls) -> Dict[str, BuilderConfig]:
+ """Dictionary of pre-defined configurations for this builder class."""
+ configs = {config.name: config for config in cls.BUILDER_CONFIGS}
+ if len(configs) != len(cls.BUILDER_CONFIGS):
+ names = [config.name for config in cls.BUILDER_CONFIGS]
+ raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}")
+ return configs
+
+ @property
+ def cache_dir(self):
+ return self._cache_dir
+
+ def _use_legacy_cache_dir_if_possible(self, dataset_module: "DatasetModule"):
+ # Check for the legacy cache directory template (datasets<3.0.0)
+ self._legacy_relative_data_dir = (
+ self._check_legacy_cache2(dataset_module) or self._check_legacy_cache() or None
+ )
+ self._cache_dir = self._build_cache_dir()
+ self._output_dir = self._cache_dir
+
+ def _relative_data_dir(self, with_version=True, with_hash=True) -> str:
+ """Relative path of this dataset in cache_dir:
+ Will be:
+ self.dataset_name/self.config.version/self.hash/
+ or if a repo_id with a namespace has been specified:
+ self.namespace___self.dataset_name/self.config.version/self.hash/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ if self._legacy_relative_data_dir is not None and with_version and with_hash:
+ return self._legacy_relative_data_dir
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ builder_data_dir = self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}"
+ builder_data_dir = posixpath.join(builder_data_dir, self.config_id)
+ if with_version:
+ builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version))
+ if with_hash and self.hash and isinstance(self.hash, str):
+ builder_data_dir = posixpath.join(builder_data_dir, self.hash)
+ return builder_data_dir
+
+ def _build_cache_dir(self):
+ """Return the data directory for the current version."""
+ builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False))
+ version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True))
+
+ def _other_versions_on_disk():
+ """Returns previous versions on disk."""
+ if not os.path.exists(builder_data_dir):
+ return []
+
+ version_dirnames = []
+ for dir_name in os.listdir(builder_data_dir):
+ try:
+ version_dirnames.append((utils.Version(dir_name), dir_name))
+ except ValueError: # Invalid version (ex: incomplete data dir)
+ pass
+ version_dirnames.sort(reverse=True)
+ return version_dirnames
+
+ # Check and warn if other versions exist
+ if not is_remote_url(builder_data_dir):
+ version_dirs = _other_versions_on_disk()
+ if version_dirs:
+ other_version = version_dirs[0][0]
+ if other_version != self.config.version:
+ warn_msg = (
+ f"Found a different version {str(other_version)} of dataset {self.dataset_name} in "
+ f"cache_dir {self._cache_dir_root}. Using currently defined version "
+ f"{str(self.config.version)}."
+ )
+ logger.warning(warn_msg)
+
+ return version_data_dir
+
+ @abc.abstractmethod
+ def _info(self) -> DatasetInfo:
+ """Construct the DatasetInfo object. See `DatasetInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (DatasetInfo) The dataset information
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def get_imported_module_dir(cls):
+ """Return the path of the module of this class or subclass."""
+ return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
+
+ def _rename(self, src: str, dst: str):
+ rename(self._fs, src, dst)
+
+ def download_and_prepare(
+ self,
+ output_dir: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ try_from_hf_gcs="deprecated",
+ dl_manager: Optional[DownloadManager] = None,
+ base_path: Optional[str] = None,
+ use_auth_token="deprecated",
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **download_and_prepare_kwargs,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ output_dir (`str`, *optional*):
+ Output directory for the dataset.
+ Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default.
+
+
+ download_config (`DownloadConfig`, *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, *optional*):
+ Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ try_from_hf_gcs (`bool`):
+ If `True`, it will try to download the already prepared dataset from the HF Google cloud storage.
+
+
+
+ `try_from_hf_gcs` was deprecated in version 2.16.0 and will be removed in 3.0.0.
+ Host the processed files on the Hugging Face Hub instead.
+
+
+ dl_manager (`DownloadManager`, *optional*):
+ Specific `DownloadManger` to use.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files. This can be a remote url.
+ If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead.
+ use_auth_token (`Union[str, bool]`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from ~/.huggingface.
+
+
+
+ Pass `use_auth_token` to `load_dataset_builder` instead.
+
+
+ file_format (`str`, *optional*):
+ Format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files.
+
+
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+
+
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the caching file-system backend, if any.
+
+
+ **download_and_prepare_kwargs (additional keyword arguments): Keyword arguments.
+
+ Example:
+
+ Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare()
+ ```
+
+ Download and prepare the dataset as sharded Parquet files locally:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare("./output_dir", file_format="parquet")
+ ```
+
+ Download and prepare the dataset as sharded Parquet files in a cloud storage:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key}
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet")
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass `token` to `load_dataset_builder` instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ else:
+ token = self.token
+
+ if try_from_hf_gcs != "deprecated":
+ warnings.warn(
+ "'try_from_hf_gcs' was deprecated in version 2.16.0 and will be removed in 3.0.0.",
+ FutureWarning,
+ )
+ else:
+ try_from_hf_gcs = False
+
+ output_dir = output_dir if output_dir is not None else self._cache_dir
+ # output_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ fs, output_dir = url_to_fs(output_dir, **(storage_options or {}))
+ self._fs = fs
+ self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir)
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+ base_path = base_path if base_path is not None else self.base_path
+
+ if file_format is not None and file_format not in ["arrow", "parquet"]:
+ raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'")
+ self._file_format = file_format
+
+ if self._fs._strip_protocol(self._output_dir) == "":
+ # We don't support the root directory, because it has no dirname,
+ # and we need a dirname to use a .incomplete directory
+ # when the dataset is being written
+ raise RuntimeError(
+ f"Unable to download and prepare the dataset at the root {self._output_dir}. "
+ f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'"
+ )
+
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig(
+ cache_dir=self._cache_downloaded_dir,
+ force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ use_etag=False,
+ num_proc=num_proc,
+ token=token,
+ storage_options=self.storage_options,
+ ) # We don't use etag for data files to speed up the process
+
+ dl_manager = DownloadManager(
+ dataset_name=self.dataset_name,
+ download_config=download_config,
+ data_dir=self.config.data_dir,
+ base_path=base_path,
+ record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS),
+ )
+
+ is_local = not is_remote_filesystem(self._fs)
+
+ if (
+ isinstance(dl_manager, MockDownloadManager)
+ or not is_local
+ or file_format != "arrow"
+ or max_shard_size is not None
+ ):
+ try_from_hf_gcs = False
+ self.dl_manager = dl_manager
+
+ # Prevent parallel local disk operations
+ if is_local:
+ # Create parent directory of the output_dir to put the lock file in there
+ Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
+ lock_path = self._output_dir + "_builder.lock"
+
+ # File locking only with local paths; no file locking on GCS or S3
+ with FileLock(lock_path) if is_local else contextlib.nullcontext():
+ # Check if the data already exists
+ data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME))
+ if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS:
+ logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})")
+ # We need to update the info in case some splits were added in the meantime
+ # for example when calling load_dataset from multiple workers.
+ self.info = self._load_info()
+ self.download_post_processing_resources(dl_manager)
+ return
+
+ logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})")
+ if is_local: # if cache dir is local, check for available space
+ if not has_sufficient_disk_space(
+ self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent
+ ):
+ raise OSError(
+ f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})"
+ )
+
+ @contextlib.contextmanager
+ def incomplete_dir(dirname):
+ """Create temporary dir for dirname and rename on exit."""
+ if not is_local:
+ self._fs.makedirs(dirname, exist_ok=True)
+ yield dirname
+ else:
+ tmp_dir = dirname + ".incomplete"
+ os.makedirs(tmp_dir, exist_ok=True)
+ try:
+ yield tmp_dir
+ if os.path.isdir(dirname):
+ shutil.rmtree(dirname)
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory
+ shutil.move(tmp_dir, dirname)
+ finally:
+ if os.path.exists(tmp_dir):
+ shutil.rmtree(tmp_dir)
+
+ # Print is intentional: we want this to always go to stdout so user has
+ # information needed to cancel download/preparation if needed.
+ # This comes right before the progress bar.
+ if self.info.size_in_bytes:
+ logger.info(
+ f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} "
+ f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, "
+ f"post-processed: {size_str(self.info.post_processing_size)}, "
+ f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..."
+ )
+ else:
+ _dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir
+ logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...")
+
+ self._check_manual_download(dl_manager)
+
+ # Create a tmp dir and rename to self._output_dir on successful exit.
+ with incomplete_dir(self._output_dir) as tmp_output_dir:
+ # Temporarily assign _output_dir to tmp_data_dir to avoid having to forward
+ # it to every sub function.
+ with temporary_assignment(self, "_output_dir", tmp_output_dir):
+ # Try to download the already prepared dataset files
+ downloaded_from_gcs = False
+ if try_from_hf_gcs:
+ try:
+ self._download_prepared_from_hf_gcs(dl_manager.download_config)
+ downloaded_from_gcs = True
+ except (DatasetNotOnHfGcsError, MissingFilesOnHfGcsError):
+ logger.info("Dataset not on Hf google storage. Downloading and preparing it from source")
+ except ConnectionError:
+ logger.warning("HF google storage unreachable. Downloading and preparing it from source")
+ if not downloaded_from_gcs:
+ prepare_split_kwargs = {"file_format": file_format}
+ if max_shard_size is not None:
+ prepare_split_kwargs["max_shard_size"] = max_shard_size
+ if num_proc is not None:
+ prepare_split_kwargs["num_proc"] = num_proc
+ self._download_and_prepare(
+ dl_manager=dl_manager,
+ verification_mode=verification_mode,
+ **prepare_split_kwargs,
+ **download_and_prepare_kwargs,
+ )
+ # Sync info
+ self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
+ self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
+ self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
+ # Save info
+ self._save_info()
+
+ # Download post processing resources
+ self.download_post_processing_resources(dl_manager)
+
+ logger.info(
+ f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. "
+ f"Subsequent calls will reuse this data."
+ )
+
+ def _check_manual_download(self, dl_manager):
+ if self.manual_download_instructions is not None and dl_manager.manual_dir is None:
+ raise ManualDownloadError(
+ textwrap.dedent(
+ f"""\
+ The dataset {self.dataset_name} with config {self.config.name} requires manual data.
+ Please follow the manual download instructions:
+ {self.manual_download_instructions}
+ Manual data can be loaded with:
+ datasets.load_dataset("{self.dataset_name}", data_dir="")"""
+ )
+ )
+
+ def _download_prepared_from_hf_gcs(self, download_config: DownloadConfig):
+ relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
+ reader = ArrowReader(self._output_dir, self.info)
+ # use reader instructions to download the right files
+ reader.download_from_hf_gcs(download_config, relative_data_dir)
+ downloaded_info = DatasetInfo.from_directory(self._output_dir)
+ self.info.update(downloaded_info)
+ # download post processing resources
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ for split in self.info.splits:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ try:
+ resource_path = cached_path(remote_cache_dir + "/" + resource_file_name)
+ shutil.move(resource_path, os.path.join(self._output_dir, resource_file_name))
+ except ConnectionError:
+ logger.info(f"Couldn't download resourse file {resource_file_name} from Hf google storage.")
+ logger.info("Dataset downloaded from Hf google storage.")
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs):
+ """Downloads and prepares dataset for reading.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required data and generate
+ the pre-processed datasets files.
+
+ Args:
+ dl_manager ([`DownloadManager`]):
+ `DownloadManager` used to download and cache data.
+ verification_mode ([`VerificationMode`]):
+ if `ALL_CHECKS`, perform all the verifications including checksums.
+ if `BASIC_CHECKS`, do not perform checksums, only perform split tests.
+ if `NO_CHECKS`, do not perform any verification.
+ prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size`
+ """
+ # Generating data for all splits
+ split_dict = SplitDict(dataset_name=self.dataset_name)
+ split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
+ split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
+
+ # Checksums verification
+ if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:
+ verify_checksums(
+ self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
+ )
+
+ # Build splits
+ for split_generator in split_generators:
+ if str(split_generator.split_info.name).lower() == "all":
+ raise ValueError(
+ "`all` is a special split keyword corresponding to the "
+ "union of all splits, so cannot be used as key in "
+ "._split_generator()."
+ )
+
+ logger.info(f"Generating {split_generator.split_info.name} split")
+ split_dict.add(split_generator.split_info)
+
+ try:
+ # Prepare split will record examples associated to the split
+ self._prepare_split(split_generator, **prepare_split_kwargs)
+ except OSError as e:
+ raise OSError(
+ "Cannot find data file. "
+ + (self.manual_download_instructions or "")
+ + "\nOriginal error:\n"
+ + str(e)
+ ) from None
+ # If check_duplicates is set to True , then except DuplicatedKeysError
+ except DuplicatedKeysError as e:
+ raise DuplicatedKeysError(
+ e.key,
+ e.duplicate_key_indices,
+ fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py",
+ ) from None
+ dl_manager.manage_extracted_files()
+
+ if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
+ verify_splits(self.info.splits, split_dict)
+
+ # Update the info object with the splits.
+ self.info.splits = split_dict
+ self.info.download_size = dl_manager.downloaded_size
+
+ def download_post_processing_resources(self, dl_manager):
+ for split in self.info.splits or []:
+ for resource_name, resource_file_name in self._post_processing_resources(split).items():
+ if not not is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}")
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resource_path = os.path.join(self._output_dir, resource_file_name)
+ if not os.path.exists(resource_path):
+ downloaded_resource_path = self._download_post_processing_resources(
+ split, resource_name, dl_manager
+ )
+ if downloaded_resource_path:
+ logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}")
+ shutil.move(downloaded_resource_path, resource_path)
+
+ def _load_info(self) -> DatasetInfo:
+ return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_info(self):
+ file_lock = (
+ FileLock(self._output_dir + "_info.lock")
+ if not is_remote_filesystem(self._fs)
+ else contextlib.nullcontext()
+ )
+ with file_lock:
+ self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_infos(self):
+ file_lock = (
+ FileLock(self._output_dir + "_infos.lock")
+ if not is_remote_filesystem(self._fs)
+ else contextlib.nullcontext()
+ )
+ with file_lock:
+ DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ """Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
+ del prepare_split_kwargs
+ return {}
+
+ def as_dataset(
+ self,
+ split: Optional[Split] = None,
+ run_post_process=True,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ in_memory=False,
+ ) -> Union[Dataset, DatasetDict]:
+ """Return a Dataset for the specified split.
+
+ Args:
+ split (`datasets.Split`):
+ Which subset of the data to return.
+ run_post_process (`bool`, defaults to `True`):
+ Whether to run post-processing dataset transforms and/or add
+ indexes.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Whether to ignore the verifications of the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ datasets.Dataset
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder('rotten_tomatoes')
+ >>> builder.download_and_prepare()
+ >>> ds = builder.as_dataset(split='train')
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 8530
+ })
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = verification_mode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if self._file_format is not None and self._file_format != "arrow":
+ raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.')
+ if is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.")
+ if not os.path.exists(self._output_dir):
+ raise FileNotFoundError(
+ f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call "
+ "builder.download_and_prepare(), or use "
+ "datasets.load_dataset() before trying to access the Dataset object."
+ )
+
+ logger.debug(f'Constructing Dataset for split {split or ", ".join(self.info.splits)}, from {self._output_dir}')
+
+ # By default, return all splits
+ if split is None:
+ split = {s: s for s in self.info.splits}
+
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ partial(
+ self._build_single_dataset,
+ run_post_process=run_post_process,
+ verification_mode=verification_mode,
+ in_memory=in_memory,
+ ),
+ split,
+ map_tuple=True,
+ disable_tqdm=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = DatasetDict(datasets)
+ return datasets
+
+ def _build_single_dataset(
+ self,
+ split: Union[str, ReadInstruction, Split],
+ run_post_process: bool,
+ verification_mode: VerificationMode,
+ in_memory: bool = False,
+ ):
+ """as_dataset for a single split."""
+ if not isinstance(split, ReadInstruction):
+ split = str(split)
+ if split == "all":
+ split = "+".join(self.info.splits.keys())
+ split = Split(split)
+
+ # Build base dataset
+ ds = self._as_dataset(
+ split=split,
+ in_memory=in_memory,
+ )
+ if run_post_process:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resources_paths = {
+ resource_name: os.path.join(self._output_dir, resource_file_name)
+ for resource_name, resource_file_name in self._post_processing_resources(split).items()
+ }
+ post_processed = self._post_process(ds, resources_paths)
+ if post_processed is not None:
+ ds = post_processed
+ recorded_checksums = {}
+ record_checksums = False
+ for resource_name, resource_path in resources_paths.items():
+ size_checksum = get_size_checksum_dict(resource_path)
+ recorded_checksums[resource_name] = size_checksum
+ if verification_mode == VerificationMode.ALL_CHECKS and record_checksums:
+ if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
+ expected_checksums = None
+ else:
+ expected_checksums = self.info.post_processed.resources_checksums.get(split)
+ verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
+ if self.info.post_processed is None:
+ self.info.post_processed = PostProcessedInfo()
+ if self.info.post_processed.resources_checksums is None:
+ self.info.post_processed.resources_checksums = {}
+ self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
+ self.info.post_processing_size = sum(
+ checksums_dict["num_bytes"]
+ for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
+ for checksums_dict in split_checksums_dicts.values()
+ )
+ if self.info.dataset_size is not None and self.info.download_size is not None:
+ self.info.size_in_bytes = (
+ self.info.dataset_size + self.info.download_size + self.info.post_processing_size
+ )
+ self._save_info()
+ ds._info.post_processed = self.info.post_processed
+ ds._info.post_processing_size = self.info.post_processing_size
+ ds._info.size_in_bytes = self.info.size_in_bytes
+ if self.info.post_processed.features is not None:
+ if self.info.post_processed.features.type != ds.features.type:
+ raise ValueError(
+ f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}"
+ )
+ else:
+ ds.info.features = self.info.post_processed.features
+
+ return ds
+
+ def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset:
+ """Constructs a `Dataset`.
+
+ This is the internal implementation to overwrite called when user calls
+ `as_dataset`. It should read the pre-processed datasets files and generate
+ the `Dataset` object.
+
+ Args:
+ split (`datasets.Split`):
+ which subset of the data to read.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ `Dataset`
+ """
+ cache_dir = self._fs._strip_protocol(self._output_dir)
+ dataset_name = self.dataset_name
+ if self._check_legacy_cache():
+ dataset_name = self.name
+ dataset_kwargs = ArrowReader(cache_dir, self.info).read(
+ name=dataset_name,
+ instructions=split,
+ split_infos=self.info.splits.values(),
+ in_memory=in_memory,
+ )
+ fingerprint = self._get_dataset_fingerprint(split)
+ return Dataset(fingerprint=fingerprint, **dataset_kwargs)
+
+ def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str:
+ """The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs."""
+ hasher = Hasher()
+ hasher.update(Path(self._relative_data_dir()).as_posix())
+ hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder)
+ fingerprint = hasher.hexdigest()
+ return fingerprint
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ base_path: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ if is_remote_filesystem(self._fs):
+ raise NotImplementedError(
+ f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet."
+ )
+
+ dl_manager = StreamingDownloadManager(
+ base_path=base_path or self.base_path,
+ download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
+ dataset_name=self.dataset_name,
+ data_dir=self.config.data_dir,
+ )
+ self._check_manual_download(dl_manager)
+ splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}
+ # By default, return all splits
+ if split is None:
+ splits_generator = splits_generators
+ elif split in splits_generators:
+ splits_generator = splits_generators[split]
+ else:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}")
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ self._as_streaming_dataset_single,
+ splits_generator,
+ map_tuple=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _as_streaming_dataset_single(
+ self,
+ splits_generator,
+ ) -> IterableDataset:
+ ex_iterable = self._get_examples_iterable_for_split(splits_generator)
+ # add auth to be able to access and decode audio/image files from private repositories.
+ token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {}
+ return IterableDataset(
+ ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id
+ )
+
+ def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]:
+ """Run dataset transforms or add indexes"""
+ return None
+
+ def _post_processing_resources(self, split: str) -> Dict[str, str]:
+ """Mapping resource_name -> resource_file_name"""
+ return {}
+
+ def _download_post_processing_resources(
+ self, split: str, resource_name: str, dl_manager: DownloadManager
+ ) -> Optional[str]:
+ """Download the resource using the download manager and return the downloaded path."""
+ return None
+
+ @abc.abstractmethod
+ def _split_generators(self, dl_manager: Union[DownloadManager, StreamingDownloadManager]):
+ """Specify feature dictionary generators and dataset splits.
+
+ This function returns a list of `SplitGenerator`s defining how to generate
+ data and what splits to use.
+
+ Example:
+
+ return [
+ datasets.SplitGenerator(
+ name=datasets.Split.TRAIN,
+ gen_kwargs={'file': 'train_data.zip'},
+ ),
+ datasets.SplitGenerator(
+ name=datasets.Split.TEST,
+ gen_kwargs={'file': 'test_data.zip'},
+ ),
+ ]
+
+ The above code will first call `_generate_examples(file='train_data.zip')`
+ to write the train data, then `_generate_examples(file='test_data.zip')` to
+ write the test data.
+
+ Datasets are typically split into different subsets to be used at various
+ stages of training and evaluation.
+
+ Note that for datasets without a `VALIDATION` split, you can use a
+ fraction of the `TRAIN` data for evaluation as you iterate on your model
+ so as not to overfit to the `TEST` data.
+
+ For downloads and extractions, use the given `download_manager`.
+ Note that the `DownloadManager` caches downloads, so it is fine to have each
+ generator attempt to download the source data.
+
+ A good practice is to download all data in this function, and then
+ distribute the relevant parts to each split with the `gen_kwargs` argument
+
+ Args:
+ dl_manager (`Union[DownloadManager, StreamingDownloadManager]`):
+ Download manager to download the data
+
+ Returns:
+ `list`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Generate the examples and record them on disk.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ file_format (`str`, *optional*):
+ format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ **kwargs: Additional kwargs forwarded from _download_and_prepare (ex:
+ beam pipeline)
+ """
+ raise NotImplementedError()
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ """Generate the examples on the fly.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ """
+ raise NotImplementedError()
+
+
+class GeneratorBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on dict generators.
+
+ `GeneratorBasedBuilder` is a convenience class that abstracts away much
+ of the data writing and reading of `DatasetBuilder`. It expects subclasses to
+ implement generators of feature dictionaries across the dataset splits
+ (`_split_generators`). See the method docstrings for details.
+ """
+
+ @abc.abstractmethod
+ def _generate_examples(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `dict`, a feature dictionary
+ ready to be encoded and written to disk. The example will be
+ encoded with `self.info.features.encode_example({...})`.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ check_duplicate_keys: bool,
+ file_format="arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[int, str]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+
+ if self.info.splits is not None:
+ split_info = self.info.splits[split_generator.name]
+ else:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_input_shards < num_proc:
+ logger.warning(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=split_info.num_examples,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ "split_info": split_info,
+ "check_duplicate_keys": check_duplicate_keys,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_and_job: Tuple[int]):
+ shard_id, job_id = shard_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shards_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self,
+ gen_kwargs: dict,
+ fpath: str,
+ file_format: str,
+ max_shard_size: int,
+ split_info: SplitInfo,
+ check_duplicate_keys: bool,
+ job_id: int,
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ generator = self._generate_examples(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for key, record in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ example = self.info.features.encode_example(record) if self.info.features is not None else record
+ writer.write(example, key)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ super()._download_and_prepare(
+ dl_manager,
+ verification_mode,
+ check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
+ or verification_mode == VerificationMode.ALL_CHECKS,
+ **prepare_splits_kwargs,
+ )
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs)
+
+
+class ArrowBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet)."""
+
+ @abc.abstractmethod
+ def _generate_tables(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `pyarrow.Table`, a feature table
+ ready to be encoded and written to disk.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[str, int]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+
+ try:
+ split_info = self.info.splits[split_generator.name]
+ except Exception:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_input_shards < num_proc:
+ logger.warning(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=split_info.num_examples,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_id_and_job: Tuple[int]):
+ shard_id, job_id = shard_id_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shard_ids_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ gen_kwargs = {k: tracked_list(v) if isinstance(v, list) else v for k, v in gen_kwargs.items()}
+ generator = self._generate_tables(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for _, table in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ writer.write_table(table)
+ except CastError as cast_error:
+ raise DatasetGenerationCastError.from_cast_error(
+ cast_error=cast_error,
+ builder_name=self.info.builder_name,
+ gen_kwargs=gen_kwargs,
+ token=self.token,
+ )
+ num_examples_progress_update += len(table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ if isinstance(e, DatasetGenerationError):
+ raise
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs)
+
+
+class MissingBeamOptions(ValueError):
+ pass
+
+
+@deprecated("Use `GeneratorBasedBuilder` or `ArrowBasedBuilder` instead.")
+class BeamBasedBuilder(DatasetBuilder):
+ """Beam-based Builder."""
+
+ def __init__(self, *args, beam_runner=None, beam_options=None, **kwargs):
+ self._beam_runner = beam_runner
+ self._beam_options = beam_options
+ self._beam_writers = {} # {split: beam_writer} mapping.
+ super().__init__(*args, **kwargs)
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ # Pass `pipeline` into `_split_generators()` from `prepare_split_kwargs` if
+ # it's in the call signature of `_split_generators()`.
+ # This allows for global preprocessing in beam.
+ split_generators_kwargs = {}
+ split_generators_arg_names = inspect.signature(self._split_generators).parameters.keys()
+ if "pipeline" in split_generators_arg_names:
+ split_generators_kwargs["pipeline"] = prepare_split_kwargs["pipeline"]
+ return split_generators_kwargs
+
+ @abc.abstractmethod
+ def _build_pcollection(self, pipeline, **kwargs):
+ """Build the beam pipeline examples for each `SplitGenerator`.
+
+ This function extracts examples from the raw data with parallel transforms
+ in a Beam pipeline. It is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples from the PCollection will be
+ encoded and written to disk.
+
+
+ Warning: When running in a distributed setup, make sure that the data
+ which will be read (download_dir, manual_dir,...) and written (cache_dir)
+ can be accessed by the workers jobs. The data should be located in a
+ shared filesystem, like GCS.
+
+
+ Args:
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
+ Apache Beam pipeline.
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs.
+
+ Returns:
+ `beam.PCollection`: Apache Beam PCollection containing the
+ example to send to `self.info.features.encode_example(...)`.
+
+ Example:
+
+ ```
+ def _build_pcollection(pipeline, extracted_dir=None):
+ return (
+ pipeline
+ | beam.Create(gfile.io.listdir(extracted_dir))
+ | beam.Map(_process_file)
+ )
+ ```
+ """
+ raise NotImplementedError()
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ # Create the Beam pipeline and forward it to `_prepare_split`
+ import apache_beam as beam
+
+ import datasets.utils.beam_utils as beam_utils
+
+ beam_runner = self._beam_runner
+ beam_options = self._beam_options
+
+ if not beam_runner and not beam_options:
+ usage_example = f"load_dataset('{self.name}', '{self.config.name}', beam_runner='DirectRunner')"
+ raise MissingBeamOptions(
+ "Trying to generate a dataset using Apache Beam, yet no Beam Runner "
+ "or PipelineOptions() has been provided in `load_dataset` or in the "
+ "builder arguments. For big datasets it has to run on large-scale data "
+ "processing tools like Dataflow, Spark, etc. More information about "
+ "Apache Beam runners at "
+ "https://beam.apache.org/documentation/runners/capability-matrix/"
+ "\nIf you really want to run it locally because you feel like the "
+ "Dataset is small enough, you can use the local beam runner called "
+ "`DirectRunner` (you may run out of memory). \nExample of usage: "
+ f"\n\t`{usage_example}`"
+ )
+ if self._writer_batch_size is not None:
+ logger.warning(
+ "`writer_batch_size` is not supported for beam pipelines yet. Using the default chunk size for writing."
+ )
+
+ # Beam type checking assumes transforms multiple outputs are of same type,
+ # which is not our case. Plus it doesn't handle correctly all types, so we
+ # are better without it.
+ pipeline_options = {"pipeline_type_check": False}
+ if "num_proc" in prepare_splits_kwargs:
+ num_workers = prepare_splits_kwargs.pop("num_proc")
+ pipeline_options["direct_num_workers"] = num_workers
+ pipeline_options["num_workers"] = num_workers
+ pipeline_options["direct_running_mode"] = "multi_processing"
+ # TODO: Fix ModuleNotFoundError: No module named 'datasets_modules' when running multiprocessed DirectRunner
+ raise NotImplementedError("Using a DirectRunner with `num_proc` for multiprocessing it not supported yet.")
+ beam_options = beam_options or beam.options.pipeline_options.PipelineOptions.from_dictionary(pipeline_options)
+ # Use a single pipeline for all splits
+ pipeline = beam_utils.BeamPipeline(
+ runner=beam_runner,
+ options=beam_options,
+ )
+ super()._download_and_prepare(
+ dl_manager, verification_mode=VerificationMode.NO_CHECKS, pipeline=pipeline, **prepare_splits_kwargs
+ ) # TODO handle verification_mode in beam datasets
+ # Run pipeline
+ pipeline_results = pipeline.run()
+ pipeline_results.wait_until_finish()
+ metrics = pipeline_results.metrics()
+ # Update `info.splits`.
+ split_dict = self.info.splits
+ for split_name, beam_writer in self._beam_writers.items():
+ m_filter = beam.metrics.MetricsFilter().with_namespace(namespace=split_name)
+ num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter))
+ split_info = split_dict[split_name]
+ split_info.num_examples = num_examples
+ split_info.num_bytes = num_bytes
+ if hasattr(beam_writer, "_shard_lengths") and len(beam_writer._shard_lengths) > 1:
+ # keep the -SSSSS-of-NNNNN pattern
+ split_info.shard_lengths = beam_writer._shard_lengths
+ else:
+ # don't use any pattern
+ file_format = prepare_splits_kwargs.get("file_format", "arrow")
+ src_fname = f"{self.dataset_name}-{split_name}-00000-of-00001.{file_format}"
+ dst_fname = f"{self.dataset_name}-{split_name}.{file_format}"
+ src_fpath = posixpath.join(self._output_dir, src_fname)
+ dst_fpath = posixpath.join(self._output_dir, dst_fname)
+ self._rename(src_fpath, dst_fpath)
+
+ def _save_info(self):
+ download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ with xopen(f"{self._output_dir}/{config.DATASET_INFO_FILENAME}", "wb", download_config=download_config) as f:
+ self.info._dump_info(f)
+ if self.info.license:
+ with xopen(f"{self._output_dir}/{config.LICENSE_FILENAME}", "wb", download_config=download_config) as f:
+ self.info._dump_license(f)
+
+ def _prepare_split(
+ self, split_generator, pipeline, file_format="arrow", max_shard_size: Optional[Union[str, int]] = None
+ ):
+ import apache_beam as beam
+
+ if max_shard_size is not None:
+ raise NotImplementedError(
+ "max_shard_size is not supported for Beam datasets."
+ "Please set it to None to use the default Apache Beam sharding and get the best performance."
+ )
+
+ # To write examples in filesystem:
+ split_name = split_generator.split_info.name
+ fname = f"{self.dataset_name}-{split_name}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+ beam_writer = BeamWriter(
+ features=self.info.features, path=fpath, namespace=split_name, cache_dir=self._output_dir
+ )
+ self._beam_writers[split_name] = beam_writer
+
+ encode_example = self.info.features.encode_example
+
+ # Note: We need to wrap the pipeline in a PTransform to avoid re-using the
+ # same label names for each split
+ @beam.ptransform_fn
+ def _build_pcollection(pipeline):
+ """PTransformation which build a single split."""
+ # Encode the PCollection
+ pcoll_examples = self._build_pcollection(pipeline, **split_generator.gen_kwargs)
+ pcoll_examples |= "Encode" >> beam.Map(lambda key_ex: (key_ex[0], encode_example(key_ex[1])))
+ return beam_writer.write_from_pcollection(pcoll_examples)
+
+ # Add the PCollection to the pipeline
+ _ = pipeline | split_name >> _build_pcollection() # pylint: disable=no-value-for-parameter max_bytes_per_shard
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ self._request_info_from_hf_gcs()
+ datasets = {
+ split.name: IterableDataset(self._get_examples_iterable_for_split(split), info=self.info, split=split.name)
+ for split in self.info.splits.values()
+ }
+ if split:
+ try:
+ datasets = datasets[split]
+ except KeyError:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(datasets)}")
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _get_examples_iterable_for_split(self, split: SplitInfo) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples_from_hf_gcs, {"split": split})
+
+ def _generate_examples_from_hf_gcs(self, split: SplitInfo):
+ if split.shard_lengths:
+ num_shards = len(split.shard_lengths)
+ remote_prepared_urls = [
+ f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}-{shard_id:05d}-of-{num_shards:05d}.arrow"
+ for shard_id in range(num_shards)
+ ]
+ else:
+ remote_prepared_urls = [f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}.arrow"]
+ key = 0
+ download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ for remote_prepared_url in remote_prepared_urls:
+ with xopen(remote_prepared_url, "rb", download_config=download_config) as f:
+ with pa.ipc.open_stream(f) as reader:
+ for record_batch in reader:
+ for record in record_batch.to_pylist():
+ yield key, record
+ key += 1
+
+ def _request_info_from_hf_gcs(self):
+ from .download.streaming_download_manager import xopen
+
+ remote_dataset_info = f"{self._remote_cache_dir_from_hf_gcs}/{config.DATASET_INFO_FILENAME}"
+ try:
+ download_config = download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ with xopen(remote_dataset_info, download_config=download_config) as f:
+ import json
+
+ _info = json.load(f)
+ except FileNotFoundError as err:
+ raise DatasetNotOnHfGcsError(err) from None
+ self.info.update(DatasetInfo.from_dict(_info))
+
+ @property
+ def _remote_cache_dir_from_hf_gcs(self):
+ relative_data_dir = self._relative_data_dir(with_hash=False)
+ return HF_GCP_BASE_URL + "/" + Path(relative_data_dir).as_posix()
diff --git a/venv/lib/python3.10/site-packages/datasets/config.py b/venv/lib/python3.10/site-packages/datasets/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..9668dfbd91ef58dd12728cf52044ca03d49a92f6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/config.py
@@ -0,0 +1,272 @@
+import importlib
+import importlib.metadata
+import logging
+import os
+import platform
+from pathlib import Path
+from typing import Optional
+
+from packaging import version
+
+
+logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging
+
+# Datasets
+S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets"
+CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets"
+REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}"
+
+# Metrics
+S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
+CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
+REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}"
+
+# Hub
+HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
+HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
+HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}"
+HUB_DEFAULT_VERSION = "main"
+
+PY_VERSION = version.parse(platform.python_version())
+
+# General environment variables accepted values for booleans
+ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
+ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"}
+ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
+ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"})
+
+
+# Imports
+DILL_VERSION = version.parse(importlib.metadata.version("dill"))
+FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec"))
+PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
+PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
+HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub"))
+
+USE_TF = os.environ.get("USE_TF", "AUTO").upper()
+USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
+USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
+
+TORCH_VERSION = "N/A"
+TORCH_AVAILABLE = False
+
+if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
+ TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
+ if TORCH_AVAILABLE:
+ try:
+ TORCH_VERSION = version.parse(importlib.metadata.version("torch"))
+ logger.info(f"PyTorch version {TORCH_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling PyTorch because USE_TF is set")
+
+POLARS_VERSION = "N/A"
+POLARS_AVAILABLE = importlib.util.find_spec("polars") is not None
+
+if POLARS_AVAILABLE:
+ try:
+ POLARS_VERSION = version.parse(importlib.metadata.version("polars"))
+ logger.info(f"Polars version {POLARS_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+
+TF_VERSION = "N/A"
+TF_AVAILABLE = False
+
+if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
+ TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
+ if TF_AVAILABLE:
+ # For the metadata, we have to look for both tensorflow and tensorflow-cpu
+ for package in [
+ "tensorflow",
+ "tensorflow-cpu",
+ "tensorflow-gpu",
+ "tf-nightly",
+ "tf-nightly-cpu",
+ "tf-nightly-gpu",
+ "intel-tensorflow",
+ "tensorflow-rocm",
+ "tensorflow-macos",
+ ]:
+ try:
+ TF_VERSION = version.parse(importlib.metadata.version(package))
+ except importlib.metadata.PackageNotFoundError:
+ continue
+ else:
+ break
+ else:
+ TF_AVAILABLE = False
+ if TF_AVAILABLE:
+ if TF_VERSION.major < 2:
+ logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
+ TF_AVAILABLE = False
+ else:
+ logger.info(f"TensorFlow version {TF_VERSION} available.")
+else:
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
+
+
+JAX_VERSION = "N/A"
+JAX_AVAILABLE = False
+
+if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None
+ if JAX_AVAILABLE:
+ try:
+ JAX_VERSION = version.parse(importlib.metadata.version("jax"))
+ logger.info(f"JAX version {JAX_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling JAX because USE_JAX is set to False")
+
+
+USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper()
+BEAM_VERSION = "N/A"
+BEAM_AVAILABLE = False
+if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ try:
+ BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam"))
+ BEAM_AVAILABLE = True
+ logger.info(f"Apache Beam version {BEAM_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling Apache Beam because USE_BEAM is set to False")
+
+
+# Optional tools for data loading
+SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None
+
+# Optional tools for feature decoding
+PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None
+IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
+ importlib.import_module("soundfile").__libsndfile_version__
+) >= version.parse("1.0.31")
+IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
+ importlib.import_module("soundfile").__libsndfile_version__
+) >= version.parse("1.1.0")
+
+# Optional compression tools
+RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None
+ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None
+LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None
+PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None
+
+# Cache location
+DEFAULT_XDG_CACHE_HOME = "~/.cache"
+XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
+DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
+HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
+
+DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
+HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
+
+DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
+HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
+
+DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
+HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
+
+DOWNLOADED_DATASETS_DIR = "downloads"
+DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR)
+DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH))
+
+EXTRACTED_DATASETS_DIR = "extracted"
+DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR)
+EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH))
+
+# Download count for the website
+HF_UPDATE_DOWNLOAD_COUNTS = (
+ os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
+)
+
+# For downloads and to check remote files metadata
+HF_DATASETS_MULTITHREADING_MAX_WORKERS = 16
+
+# Remote dataset scripts support
+__HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "1")
+HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = (
+ True
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES
+ else False
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES
+ else None
+)
+TIME_OUT_REMOTE_CODE = 15
+
+# Dataset viewer API
+USE_PARQUET_EXPORT = True
+
+# Batch size constants. For more info, see:
+# https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations)
+DEFAULT_MAX_BATCH_SIZE = 1000
+
+# Size of the preloaded record batch in `Dataset.__iter__`
+ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10
+
+# Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare)
+MAX_SHARD_SIZE = "500MB"
+
+# Parquet configuration
+PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100
+PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100
+PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100
+
+# Offline mode
+HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
+
+# Here, `True` will disable progress bars globally without possibility of enabling it
+# programmatically. `False` will enable them without possibility of disabling them.
+# If environment variable is not set (None), then the user is free to enable/disable
+# them programmatically.
+# TL;DR: env variable has priority over code
+__HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS")
+HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = (
+ __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES
+ if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None
+ else None
+)
+
+# In-memory
+DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled
+IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE))
+
+# File names
+DATASET_ARROW_FILENAME = "dataset.arrow"
+DATASET_INDICES_FILENAME = "indices.arrow"
+DATASET_STATE_JSON_FILENAME = "state.json"
+DATASET_INFO_FILENAME = "dataset_info.json"
+DATASETDICT_INFOS_FILENAME = "dataset_infos.json"
+LICENSE_FILENAME = "LICENSE"
+METRIC_INFO_FILENAME = "metric_info.json"
+DATASETDICT_JSON_FILENAME = "dataset_dict.json"
+METADATA_CONFIGS_FIELD = "configs"
+REPOCARD_FILENAME = "README.md"
+REPOYAML_FILENAME = ".huggingface.yaml"
+
+MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules"
+
+MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255
+
+# Temporary cache directory prefix
+TEMP_CACHE_DIR_PREFIX = "hf_datasets-"
+
+# Streaming
+STREAMING_READ_MAX_RETRIES = 20
+STREAMING_READ_RETRY_INTERVAL = 5
+
+# Datasets without script
+DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
+GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10
+ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
+
+# Progress bars
+PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec
+
+# Maximum number of uploaded files per commit
+UPLOADS_MAX_NUMBER_PER_COMMIT = 50
+
+# Backward compatibiliy
+MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
diff --git a/venv/lib/python3.10/site-packages/datasets/data_files.py b/venv/lib/python3.10/site-packages/datasets/data_files.py
new file mode 100644
index 0000000000000000000000000000000000000000..75fee776e5acd72671ce882db0b2543e65a4821a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/data_files.py
@@ -0,0 +1,821 @@
+import os
+import re
+from functools import partial
+from glob import has_magic
+from pathlib import Path, PurePath
+from typing import Callable, Dict, List, Optional, Set, Tuple, Union
+
+import huggingface_hub
+from fsspec.core import url_to_fs
+from fsspec.implementations.http import HTTPFileSystem
+from huggingface_hub import HfFileSystem
+from packaging import version
+from tqdm.contrib.concurrent import thread_map
+
+from . import config
+from .download import DownloadConfig
+from .naming import _split_re
+from .splits import Split
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.file_utils import _prepare_path_and_storage_options, is_local_path, is_relative_path, xbasename, xjoin
+from .utils.py_utils import glob_pattern_to_regex, string_to_dict
+
+
+SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN)
+
+
+logger = logging.get_logger(__name__)
+
+
+class Url(str):
+ pass
+
+
+class EmptyDatasetError(FileNotFoundError):
+ pass
+
+
+SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"
+
+SPLIT_KEYWORDS = {
+ Split.TRAIN: ["train", "training"],
+ Split.VALIDATION: ["validation", "valid", "dev", "val"],
+ Split.TEST: ["test", "testing", "eval", "evaluation"],
+}
+NON_WORDS_CHARS = "-._ 0-9"
+if config.FSSPEC_VERSION < version.parse("2023.9.0"):
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"]
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
+ "{keyword}/**",
+ "{keyword}[{sep}]*/**",
+ "**[{sep}/]{keyword}/**",
+ "**[{sep}/]{keyword}[{sep}]*/**",
+ ]
+elif config.FSSPEC_VERSION < version.parse("2023.12.0"):
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**/*[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"]
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
+ "{keyword}/**/*",
+ "{keyword}[{sep}]*/**/*",
+ "**/*[{sep}/]{keyword}/**/*",
+ "**/*[{sep}/]{keyword}[{sep}]*/**/*",
+ ]
+else:
+ KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**/{keyword}[{sep}]*", "**/*[{sep}]{keyword}[{sep}]*"]
+ KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [
+ "**/{keyword}/**",
+ "**/{keyword}[{sep}]*/**",
+ "**/*[{sep}]{keyword}/**",
+ "**/*[{sep}]{keyword}[{sep}]*/**",
+ ]
+
+DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST]
+DEFAULT_PATTERNS_SPLIT_IN_FILENAME = {
+ split: [
+ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
+ for keyword in SPLIT_KEYWORDS[split]
+ for pattern in KEYWORDS_IN_FILENAME_BASE_PATTERNS
+ ]
+ for split in DEFAULT_SPLITS
+}
+DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME = {
+ split: [
+ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
+ for keyword in SPLIT_KEYWORDS[split]
+ for pattern in KEYWORDS_IN_DIR_NAME_BASE_PATTERNS
+ ]
+ for split in DEFAULT_SPLITS
+}
+
+
+DEFAULT_PATTERNS_ALL = {
+ Split.TRAIN: ["**"],
+}
+
+ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED]
+ALL_DEFAULT_PATTERNS = [
+ DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME,
+ DEFAULT_PATTERNS_SPLIT_IN_FILENAME,
+ DEFAULT_PATTERNS_ALL,
+]
+if config.FSSPEC_VERSION < version.parse("2023.9.0"):
+ METADATA_PATTERNS = [
+ "metadata.csv",
+ "**/metadata.csv",
+ "metadata.jsonl",
+ "**/metadata.jsonl",
+ ] # metadata file for ImageFolder and AudioFolder
+else:
+ METADATA_PATTERNS = [
+ "**/metadata.csv",
+ "**/metadata.jsonl",
+ ] # metadata file for ImageFolder and AudioFolder
+WILDCARD_CHARACTERS = "*[]"
+FILES_TO_IGNORE = [
+ "README.md",
+ "config.json",
+ "dataset_info.json",
+ "dataset_infos.json",
+ "dummy_data.zip",
+ "dataset_dict.json",
+]
+
+
+def contains_wildcards(pattern: str) -> bool:
+ return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS)
+
+
+def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]:
+ """
+ Take the data_files patterns from the user, and format them into a dictionary.
+ Each key is the name of the split, and each value is a list of data files patterns (paths or urls).
+ The default split is "train".
+
+ Returns:
+ patterns: dictionary of split_name -> list of patterns
+ """
+ if isinstance(patterns, dict):
+ return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()}
+ elif isinstance(patterns, str):
+ return {SANITIZED_DEFAULT_SPLIT: [patterns]}
+ elif isinstance(patterns, list):
+ if any(isinstance(pattern, dict) for pattern in patterns):
+ for pattern in patterns:
+ if not (
+ isinstance(pattern, dict)
+ and len(pattern) == 2
+ and "split" in pattern
+ and isinstance(pattern.get("path"), (str, list))
+ ):
+ raise ValueError(
+ f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}"
+ )
+ splits = [pattern["split"] for pattern in patterns]
+ if len(set(splits)) != len(splits):
+ raise ValueError(f"Some splits are duplicated in data_files: {splits}")
+ return {
+ str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]]
+ for pattern in patterns
+ }
+ else:
+ return {SANITIZED_DEFAULT_SPLIT: patterns}
+ else:
+ return sanitize_patterns(list(patterns))
+
+
+def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool:
+ """
+ When a path matches a pattern, we additionnally check if it's inside a special directory
+ we ignore by default (if it starts with a double underscore).
+
+ Users can still explicitly request a filepath inside such a directory if "__pycache__" is
+ mentioned explicitly in the requested pattern.
+
+ Some examples:
+
+ base directory:
+
+ ./
+ └── __pycache__
+ └── b.txt
+
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**")
+ True
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt")
+ True
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*")
+ False
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*")
+ False
+ """
+ # We just need to check if every special directories from the path is present explicly in the pattern.
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
+ # the parent path and the parent pattern have the same number of special directories.
+ data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")]
+ data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")]
+ return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern)
+
+
+def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool:
+ """
+ When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside
+ a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot.
+
+ Users can still explicitly request a filepath that is hidden or is inside a hidden directory
+ if the hidden part is mentioned explicitly in the requested pattern.
+
+ Some examples:
+
+ base directory:
+
+ ./
+ └── .hidden_file.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*")
+ False
+
+ base directory:
+
+ ./
+ └── .hidden_dir
+ └── a.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*")
+ False
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*")
+ False
+
+ base directory:
+
+ ./
+ └── .hidden_dir
+ └── .hidden_file.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*")
+ False
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*")
+ False
+ """
+ # We just need to check if every hidden part from the path is present explicly in the pattern.
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
+ # the path and the pattern have the same number of hidden parts.
+ hidden_directories_in_path = [
+ part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."}
+ ]
+ hidden_directories_in_pattern = [
+ part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."}
+ ]
+ return len(hidden_directories_in_path) != len(hidden_directories_in_pattern)
+
+
+def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]:
+ """
+ Get the default pattern from a directory or repository by testing all the supported patterns.
+ The first patterns to return a non-empty list of data files is returned.
+
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
+ """
+ # first check the split patterns like data/{split}-00000-of-00001.parquet
+ for split_pattern in ALL_SPLIT_PATTERNS:
+ pattern = split_pattern.replace("{split}", "*")
+ try:
+ data_files = pattern_resolver(pattern)
+ except FileNotFoundError:
+ continue
+ if len(data_files) > 0:
+ splits: Set[str] = {
+ string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))["split"]
+ for p in data_files
+ }
+ if any(not re.match(_split_re, split) for split in splits):
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{splits}'.")
+ sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted(
+ splits - set(DEFAULT_SPLITS)
+ )
+ return {split: [split_pattern.format(split=split)] for split in sorted_splits}
+ # then check the default patterns based on train/valid/test splits
+ for patterns_dict in ALL_DEFAULT_PATTERNS:
+ non_empty_splits = []
+ for split, patterns in patterns_dict.items():
+ for pattern in patterns:
+ try:
+ data_files = pattern_resolver(pattern)
+ except FileNotFoundError:
+ continue
+ if len(data_files) > 0:
+ non_empty_splits.append(split)
+ break
+ if non_empty_splits:
+ return {split: patterns_dict[split] for split in non_empty_splits}
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
+
+
+def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]:
+ """
+ Get the supported metadata patterns from a directory or repository.
+ """
+ non_empty_patterns = []
+ for pattern in METADATA_PATTERNS:
+ try:
+ metadata_files = pattern_resolver(pattern)
+ if len(metadata_files) > 0:
+ non_empty_patterns.append(pattern)
+ except FileNotFoundError:
+ pass
+ if non_empty_patterns:
+ return non_empty_patterns
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
+
+
+def resolve_pattern(
+ pattern: str,
+ base_path: str,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+) -> List[str]:
+ """
+ Resolve the paths and URLs of the data files from the pattern passed by the user.
+
+ You can use patterns to resolve multiple local files. Here are a few examples:
+ - *.csv to match all the CSV files at the first level
+ - **.csv to match all the CSV files at any level
+ - data/* to match all the files inside "data"
+ - data/** to match all the files inside "data" and its subdirectories
+
+ The patterns are resolved using the fsspec glob. In fsspec>=2023.12.0 this is equivalent to
+ Python's glob.glob, Path.glob, Path.match and fnmatch where ** is unsupported with a prefix/suffix
+ other than a forward slash /.
+
+ More generally:
+ - '*' matches any character except a forward-slash (to match just the file or directory name)
+ - '**' matches any character including a forward-slash /
+
+ Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested.
+ The same applies to special directories that start with a double underscore like "__pycache__".
+ You can still include one if the pattern explicilty mentions it:
+ - to include a hidden file: "*/.hidden.txt" or "*/.*"
+ - to include a hidden directory: ".hidden/*" or ".*/*"
+ - to include a special directory: "__special__/*" or "__*/*"
+
+ Example::
+
+ >>> from datasets.data_files import resolve_pattern
+ >>> base_path = "."
+ >>> resolve_pattern("docs/**/*.py", base_path)
+ [/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py']
+
+ Args:
+ pattern (str): Unix pattern or paths or URLs of the data files to resolve.
+ The paths can be absolute or relative to base_path.
+ Remote filesystems using fsspec are supported, e.g. with the hf:// protocol.
+ base_path (str): Base path to use when resolving relative paths.
+ allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions).
+ For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"]
+ Returns:
+ List[str]: List of paths or URLs to the local or remote files that match the patterns.
+ """
+ if is_relative_path(pattern):
+ pattern = xjoin(base_path, pattern)
+ elif is_local_path(pattern):
+ base_path = os.path.splitdrive(pattern)[0] + os.sep
+ else:
+ base_path = ""
+ pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config)
+ fs, fs_pattern = url_to_fs(pattern, **storage_options)
+ files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)}
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]
+ protocol_prefix = protocol + "://" if protocol != "file" else ""
+ glob_kwargs = {}
+ if protocol == "hf" and config.HF_HUB_VERSION >= version.parse("0.20.0"):
+ # 10 times faster glob with detail=True (ignores costly info like lastCommit)
+ glob_kwargs["expand_info"] = False
+ matched_paths = [
+ filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath
+ for filepath, info in fs.glob(pattern, detail=True, **glob_kwargs).items()
+ if info["type"] == "file"
+ and (xbasename(filepath) not in files_to_ignore)
+ and not _is_inside_unrequested_special_dir(filepath, fs_pattern)
+ and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(filepath, fs_pattern)
+ ] # ignore .ipynb and __pycache__, but keep /../
+ if allowed_extensions is not None:
+ out = [
+ filepath
+ for filepath in matched_paths
+ if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:])
+ ]
+ if len(out) < len(matched_paths):
+ invalid_matched_files = list(set(matched_paths) - set(out))
+ logger.info(
+ f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}"
+ )
+ else:
+ out = matched_paths
+ if not out:
+ error_msg = f"Unable to find '{pattern}'"
+ if allowed_extensions is not None:
+ error_msg += f" with any supported extension {list(allowed_extensions)}"
+ raise FileNotFoundError(error_msg)
+ return out
+
+
+def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]:
+ """
+ Get the default pattern from a directory testing all the supported patterns.
+ The first patterns to return a non-empty list of data files is returned.
+
+ Some examples of supported patterns:
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── dataset.csv
+
+ Output:
+
+ {'train': ['**']}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ ├── train.csv
+ └── test.csv
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train.csv
+ └── test.csv
+
+ my_dataset_repository/
+ ├── README.md
+ ├── train_0.csv
+ ├── train_1.csv
+ ├── train_2.csv
+ ├── train_3.csv
+ ├── test_0.csv
+ └── test_1.csv
+
+ Output:
+
+ {'train': ['**/train[-._ 0-9]*', '**/*[-._ 0-9]train[-._ 0-9]*', '**/training[-._ 0-9]*', '**/*[-._ 0-9]training[-._ 0-9]*'],
+ 'test': ['**/test[-._ 0-9]*', '**/*[-._ 0-9]test[-._ 0-9]*', '**/testing[-._ 0-9]*', '**/*[-._ 0-9]testing[-._ 0-9]*', ...]}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train/
+ │ ├── shard_0.csv
+ │ ├── shard_1.csv
+ │ ├── shard_2.csv
+ │ └── shard_3.csv
+ └── test/
+ ├── shard_0.csv
+ └── shard_1.csv
+
+ Output:
+
+ {'train': ['**/train/**', '**/train[-._ 0-9]*/**', '**/*[-._ 0-9]train/**', '**/*[-._ 0-9]train[-._ 0-9]*/**', ...],
+ 'test': ['**/test/**', '**/test[-._ 0-9]*/**', '**/*[-._ 0-9]test/**', '**/*[-._ 0-9]test[-._ 0-9]*/**', ...]}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train-00000-of-00003.csv
+ ├── train-00001-of-00003.csv
+ ├── train-00002-of-00003.csv
+ ├── test-00000-of-00001.csv
+ ├── random-00000-of-00003.csv
+ ├── random-00001-of-00003.csv
+ └── random-00002-of-00003.csv
+
+ Output:
+
+ {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
+ 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
+ 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']}
+
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
+ """
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
+ try:
+ return _get_data_files_patterns(resolver)
+ except FileNotFoundError:
+ raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None
+
+
+def get_metadata_patterns(
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+) -> List[str]:
+ """
+ Get the supported metadata patterns from a local directory.
+ """
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
+ try:
+ return _get_metadata_files_patterns(resolver)
+ except FileNotFoundError:
+ raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None
+
+
+def _get_single_origin_metadata(
+ data_file: str,
+ download_config: Optional[DownloadConfig] = None,
+) -> Tuple[str]:
+ data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config)
+ fs, *_ = url_to_fs(data_file, **storage_options)
+ if isinstance(fs, HfFileSystem):
+ resolved_path = fs.resolve_path(data_file)
+ return (resolved_path.repo_id, resolved_path.revision)
+ elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT):
+ hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
+ data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
+ resolved_path = hffs.resolve_path(data_file)
+ return (resolved_path.repo_id, resolved_path.revision)
+ info = fs.info(data_file)
+ # s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime
+ for key in ["ETag", "etag", "mtime"]:
+ if key in info:
+ return (str(info[key]),)
+ return ()
+
+
+def _get_origin_metadata(
+ data_files: List[str],
+ download_config: Optional[DownloadConfig] = None,
+ max_workers: Optional[int] = None,
+) -> Tuple[str]:
+ max_workers = max_workers if max_workers is not None else config.HF_DATASETS_MULTITHREADING_MAX_WORKERS
+ return thread_map(
+ partial(_get_single_origin_metadata, download_config=download_config),
+ data_files,
+ max_workers=max_workers,
+ tqdm_class=hf_tqdm,
+ desc="Resolving data files",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(data_files) <= 16 or None,
+ )
+
+
+class DataFilesList(List[str]):
+ """
+ List of data files (absolute local paths or URLs).
+ It has two construction methods given the user's data files patterns :
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
+ - ``from_local_or_remote``: resolve patterns from a local path
+
+ Moreover DataFilesList has an additional attribute ``origin_metadata``.
+ It can store:
+ - the last modified time of local files
+ - ETag of remote files
+ - commit sha of a dataset repository
+
+ Thanks to this additional attribute, it is possible to hash the list
+ and get a different hash if and only if at least one file changed.
+ This is useful for caching Dataset objects that are obtained from a list of data files.
+ """
+
+ def __init__(self, data_files: List[str], origin_metadata: List[Tuple[str]]):
+ super().__init__(data_files)
+ self.origin_metadata = origin_metadata
+
+ def __add__(self, other):
+ return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata)
+
+ @classmethod
+ def from_hf_repo(
+ cls,
+ patterns: List[str],
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/")
+ return cls.from_patterns(
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
+ )
+
+ @classmethod
+ def from_local_or_remote(
+ cls,
+ patterns: List[str],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ return cls.from_patterns(
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
+ )
+
+ @classmethod
+ def from_patterns(
+ cls,
+ patterns: List[str],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ data_files = []
+ for pattern in patterns:
+ try:
+ data_files.extend(
+ resolve_pattern(
+ pattern,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ )
+ except FileNotFoundError:
+ if not has_magic(pattern):
+ raise
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
+ return cls(data_files, origin_metadata)
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
+ pattern = "|".join("\\" + ext for ext in extensions)
+ pattern = re.compile(f".*({pattern})(\\..+)?$")
+ return DataFilesList(
+ [data_file for data_file in self if pattern.match(data_file)],
+ origin_metadata=self.origin_metadata,
+ )
+
+
+class DataFilesDict(Dict[str, DataFilesList]):
+ """
+ Dict of split_name -> list of data files (absolute local paths or URLs).
+ It has two construction methods given the user's data files patterns :
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
+ - ``from_local_or_remote``: resolve patterns from a local path
+
+ Moreover each list is a DataFilesList. It is possible to hash the dictionary
+ and get a different hash if and only if at least one file changed.
+ For more info, see ``DataFilesList``.
+
+ This is useful for caching Dataset objects that are obtained from a list of data files.
+
+ Changing the order of the keys of this dictionary also doesn't change its hash.
+ """
+
+ @classmethod
+ def from_local_or_remote(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_local_or_remote(
+ patterns_for_key,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ @classmethod
+ def from_hf_repo(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_hf_repo(
+ patterns_for_key,
+ dataset_info=dataset_info,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ @classmethod
+ def from_patterns(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_patterns(
+ patterns_for_key,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
+ out = type(self)()
+ for key, data_files_list in self.items():
+ out[key] = data_files_list.filter_extensions(extensions)
+ return out
+
+
+class DataFilesPatternsList(List[str]):
+ """
+ List of data files patterns (absolute local paths or URLs).
+ For each pattern there should also be a list of allowed extensions
+ to keep, or a None ot keep all the files for the pattern.
+ """
+
+ def __init__(
+ self,
+ patterns: List[str],
+ allowed_extensions: List[Optional[List[str]]],
+ ):
+ super().__init__(patterns)
+ self.allowed_extensions = allowed_extensions
+
+ def __add__(self, other):
+ return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions)
+
+ @classmethod
+ def from_patterns(
+ cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None
+ ) -> "DataFilesPatternsDict":
+ return cls(patterns, [allowed_extensions] * len(patterns))
+
+ def resolve(
+ self,
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ data_files = []
+ for pattern, allowed_extensions in zip(self, self.allowed_extensions):
+ try:
+ data_files.extend(
+ resolve_pattern(
+ pattern,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ )
+ except FileNotFoundError:
+ if not has_magic(pattern):
+ raise
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
+ return DataFilesList(data_files, origin_metadata)
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
+ return DataFilesPatternsList(
+ self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions]
+ )
+
+
+class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]):
+ """
+ Dict of split_name -> list of data files patterns (absolute local paths or URLs).
+ """
+
+ @classmethod
+ def from_patterns(
+ cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None
+ ) -> "DataFilesPatternsDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesPatternsList.from_patterns(
+ patterns_for_key,
+ allowed_extensions=allowed_extensions,
+ )
+ if not isinstance(patterns_for_key, DataFilesPatternsList)
+ else patterns_for_key
+ )
+ return out
+
+ def resolve(
+ self,
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = DataFilesDict()
+ for key, data_files_patterns_list in self.items():
+ out[key] = data_files_patterns_list.resolve(base_path, download_config)
+ return out
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict":
+ out = type(self)()
+ for key, data_files_patterns_list in self.items():
+ out[key] = data_files_patterns_list.filter_extensions(extensions)
+ return out
diff --git a/venv/lib/python3.10/site-packages/datasets/distributed.py b/venv/lib/python3.10/site-packages/datasets/distributed.py
new file mode 100644
index 0000000000000000000000000000000000000000..e036fabaf2cf6231ae6a3ca2c443100ccbb0b4d5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/distributed.py
@@ -0,0 +1,39 @@
+from typing import TypeVar
+
+from .arrow_dataset import Dataset, _split_by_node_map_style_dataset
+from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset
+
+
+DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
+
+
+def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType:
+ """
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+
+ For map-style datasets:
+
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
+
+ For iterable datasets:
+
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
+ then the shards are evenly assigned across the nodes, which is the most optimized.
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
+
+ Args:
+ dataset ([`Dataset`] or [`IterableDataset`]):
+ The dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`.
+ """
+ if isinstance(dataset, Dataset):
+ return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size)
+ else:
+ return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
diff --git a/venv/lib/python3.10/site-packages/datasets/features/__init__.py b/venv/lib/python3.10/site-packages/datasets/features/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..65ae879a2bb7ffbd7f142c97e4558c8f25a51b6a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/features/__init__.py
@@ -0,0 +1,20 @@
+# ruff: noqa
+
+__all__ = [
+ "Audio",
+ "Array2D",
+ "Array3D",
+ "Array4D",
+ "Array5D",
+ "ClassLabel",
+ "Features",
+ "Sequence",
+ "Value",
+ "Image",
+ "Translation",
+ "TranslationVariableLanguages",
+]
+from .audio import Audio
+from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
+from .image import Image
+from .translation import Translation, TranslationVariableLanguages
diff --git a/venv/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3281d9ec8a172814a3c4f4e4bbe3a3a2fb1ee85
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9dbbcbf3d69cf474548d18fcd2b58afb87967cc9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/audio.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5eda5f239161af6b9c5dabb2671878496c09cec
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/features.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2206ac94696ee49c49da9f5bc6fff23b73f9d065
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/image.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c1984d0649bde9a41d8c0bda43251a7f6be1200
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/features/__pycache__/translation.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/features/audio.py b/venv/lib/python3.10/site-packages/datasets/features/audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7df47b7a061801196db39eca222c9e4d6f9e599
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/features/audio.py
@@ -0,0 +1,277 @@
+import os
+from dataclasses import dataclass, field
+from io import BytesIO
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..download.download_config import DownloadConfig
+from ..table import array_cast
+from ..utils.file_utils import xopen, xsplitext
+from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
+
+
+if TYPE_CHECKING:
+ from .features import FeatureType
+
+
+@dataclass
+class Audio:
+ """Audio [`Feature`] to extract audio data from an audio file.
+
+ Input: The Audio feature accepts as input:
+ - A `str`: Absolute path to the audio file (i.e. random access is allowed).
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the audio file to the archive file.
+ - `bytes`: Bytes content of the audio file.
+
+ This is useful for archived files with sequential access.
+
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the audio file to the archive file.
+ - `array`: Array containing the audio sample
+ - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
+
+ This is useful for archived files with sequential access.
+
+ Args:
+ sampling_rate (`int`, *optional*):
+ Target sampling rate. If `None`, the native sampling rate is used.
+ mono (`bool`, defaults to `True`):
+ Whether to convert the audio signal to mono by averaging samples across
+ channels.
+ decode (`bool`, defaults to `True`):
+ Whether to decode the audio data. If `False`,
+ returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, Audio
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
+ >>> ds[0]["audio"]
+ {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
+ 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
+ 'sampling_rate': 16000}
+ ```
+ """
+
+ sampling_rate: Optional[int] = None
+ mono: bool = True
+ decode: bool = True
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "dict"
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
+ _type: str = field(default="Audio", init=False, repr=False)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value: Union[str, bytes, dict]) -> dict:
+ """Encode example into a format for Arrow.
+
+ Args:
+ value (`str` or `dict`):
+ Data passed as input to Audio feature.
+
+ Returns:
+ `dict`
+ """
+ try:
+ import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
+ except ImportError as err:
+ raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
+ if isinstance(value, str):
+ return {"bytes": None, "path": value}
+ elif isinstance(value, bytes):
+ return {"bytes": value, "path": None}
+ elif "array" in value:
+ # convert the audio array to wav bytes
+ buffer = BytesIO()
+ sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
+ return {"bytes": buffer.getvalue(), "path": None}
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
+ # we set "bytes": None to not duplicate the data if they're already available locally
+ if value["path"].endswith("pcm"):
+ # "PCM" only has raw audio bytes
+ if value.get("sampling_rate") is None:
+ # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
+ raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
+ if value.get("bytes"):
+ # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
+ bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
+ else:
+ bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
+
+ buffer = BytesIO(bytes())
+ sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
+ return {"bytes": buffer.getvalue(), "path": None}
+ else:
+ return {"bytes": None, "path": value.get("path")}
+ elif value.get("bytes") is not None or value.get("path") is not None:
+ # store the audio bytes, and path is used to infer the audio format using the file extension
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
+ else:
+ raise ValueError(
+ f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
+ )
+
+ def decode_example(
+ self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
+ ) -> dict:
+ """Decode example audio file into audio data.
+
+ Args:
+ value (`dict`):
+ A dictionary with keys:
+
+ - `path`: String with relative audio file path.
+ - `bytes`: Bytes of the audio file.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode
+ audio files from private repositories on the Hub, you can pass
+ a dictionary repo_id (`str`) -> token (`bool` or `str`)
+
+ Returns:
+ `dict`
+ """
+ if not self.decode:
+ raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
+
+ path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
+ if path is None and file is None:
+ raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
+
+ try:
+ import librosa
+ import soundfile as sf
+ except ImportError as err:
+ raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
+
+ audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
+ if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
+ raise RuntimeError(
+ "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
+ )
+ elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
+ raise RuntimeError(
+ "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
+ )
+
+ if file is None:
+ token_per_repo_id = token_per_repo_id or {}
+ source_url = path.split("::")[-1]
+ pattern = (
+ config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
+ )
+ try:
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
+ token = token_per_repo_id[repo_id]
+ except (ValueError, KeyError):
+ token = None
+
+ download_config = DownloadConfig(token=token)
+ with xopen(path, "rb", download_config=download_config) as f:
+ array, sampling_rate = sf.read(f)
+
+ else:
+ array, sampling_rate = sf.read(file)
+
+ array = array.T
+ if self.mono:
+ array = librosa.to_mono(array)
+ if self.sampling_rate and self.sampling_rate != sampling_rate:
+ array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
+ sampling_rate = self.sampling_rate
+
+ return {"path": path, "array": array, "sampling_rate": sampling_rate}
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
+ from .features import Value
+
+ if self.decode:
+ raise ValueError("Cannot flatten a decoded Audio feature.")
+ return {
+ "bytes": Value("binary"),
+ "path": Value("string"),
+ }
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
+ """Cast an Arrow array to the Audio arrow storage type.
+ The Arrow types that can be converted to the Audio pyarrow storage type are:
+
+ - `pa.string()` - it must contain the "path" data
+ - `pa.binary()` - it must contain the audio bytes
+ - `pa.struct({"bytes": pa.binary()})`
+ - `pa.struct({"path": pa.string()})`
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
+
+ Args:
+ storage (`Union[pa.StringArray, pa.StructArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`
+ """
+ if pa.types.is_string(storage.type):
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_binary(storage.type):
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
+ storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
+ elif pa.types.is_struct(storage.type):
+ if storage.type.get_field_index("bytes") >= 0:
+ bytes_array = storage.field("bytes")
+ else:
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ if storage.type.get_field_index("path") >= 0:
+ path_array = storage.field("path")
+ else:
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
+ return array_cast(storage, self.pa_type)
+
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
+ """Embed audio files into the Arrow array.
+
+ Args:
+ storage (`pa.StructArray`):
+ PyArrow array to embed.
+
+ Returns:
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+
+ @no_op_if_value_is_null
+ def path_to_bytes(path):
+ with xopen(path, "rb") as f:
+ bytes_ = f.read()
+ return bytes_
+
+ bytes_array = pa.array(
+ [
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
+ for x in storage.to_pylist()
+ ],
+ type=pa.binary(),
+ )
+ path_array = pa.array(
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
+ type=pa.string(),
+ )
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
+ return array_cast(storage, self.pa_type)
diff --git a/venv/lib/python3.10/site-packages/datasets/features/features.py b/venv/lib/python3.10/site-packages/datasets/features/features.py
new file mode 100644
index 0000000000000000000000000000000000000000..893c9f9a1b0b0722471345dc2f3c68f537dfb554
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/features/features.py
@@ -0,0 +1,2202 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""This class handle features definition in datasets and some utilities to display table type."""
+
+import copy
+import json
+import re
+import sys
+from collections.abc import Iterable, Mapping
+from collections.abc import Sequence as SequenceABC
+from dataclasses import InitVar, dataclass, field, fields
+from functools import reduce, wraps
+from operator import mul
+from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
+from typing import Sequence as Sequence_
+
+import numpy as np
+import pandas as pd
+import pyarrow as pa
+import pyarrow.compute as pc
+import pyarrow.types
+import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
+from pandas.api.extensions import ExtensionArray as PandasExtensionArray
+from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
+
+from .. import config
+from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
+from ..table import array_cast
+from ..utils import experimental, logging
+from ..utils.py_utils import asdict, first_non_null_value, zip_dict
+from .audio import Audio
+from .image import Image, encode_pil_image
+from .translation import Translation, TranslationVariableLanguages
+
+
+logger = logging.get_logger(__name__)
+
+
+def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
+ """
+ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
+ """
+ if pyarrow.types.is_null(arrow_type):
+ return "null"
+ elif pyarrow.types.is_boolean(arrow_type):
+ return "bool"
+ elif pyarrow.types.is_int8(arrow_type):
+ return "int8"
+ elif pyarrow.types.is_int16(arrow_type):
+ return "int16"
+ elif pyarrow.types.is_int32(arrow_type):
+ return "int32"
+ elif pyarrow.types.is_int64(arrow_type):
+ return "int64"
+ elif pyarrow.types.is_uint8(arrow_type):
+ return "uint8"
+ elif pyarrow.types.is_uint16(arrow_type):
+ return "uint16"
+ elif pyarrow.types.is_uint32(arrow_type):
+ return "uint32"
+ elif pyarrow.types.is_uint64(arrow_type):
+ return "uint64"
+ elif pyarrow.types.is_float16(arrow_type):
+ return "float16" # pyarrow dtype is "halffloat"
+ elif pyarrow.types.is_float32(arrow_type):
+ return "float32" # pyarrow dtype is "float"
+ elif pyarrow.types.is_float64(arrow_type):
+ return "float64" # pyarrow dtype is "double"
+ elif pyarrow.types.is_time32(arrow_type):
+ return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
+ elif pyarrow.types.is_time64(arrow_type):
+ return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
+ elif pyarrow.types.is_timestamp(arrow_type):
+ if arrow_type.tz is None:
+ return f"timestamp[{arrow_type.unit}]"
+ elif arrow_type.tz:
+ return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
+ else:
+ raise ValueError(f"Unexpected timestamp object {arrow_type}.")
+ elif pyarrow.types.is_date32(arrow_type):
+ return "date32" # pyarrow dtype is "date32[day]"
+ elif pyarrow.types.is_date64(arrow_type):
+ return "date64" # pyarrow dtype is "date64[ms]"
+ elif pyarrow.types.is_duration(arrow_type):
+ return f"duration[{arrow_type.unit}]"
+ elif pyarrow.types.is_decimal128(arrow_type):
+ return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
+ elif pyarrow.types.is_decimal256(arrow_type):
+ return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
+ elif pyarrow.types.is_binary(arrow_type):
+ return "binary"
+ elif pyarrow.types.is_large_binary(arrow_type):
+ return "large_binary"
+ elif pyarrow.types.is_string(arrow_type):
+ return "string"
+ elif pyarrow.types.is_large_string(arrow_type):
+ return "large_string"
+ else:
+ raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
+
+
+def string_to_arrow(datasets_dtype: str) -> pa.DataType:
+ """
+ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
+
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
+
+ This is necessary because the datasets.Value() primitive type is constructed using a string dtype
+
+ Value(dtype=str)
+
+ But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
+ which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
+ purpose of this function.
+ """
+
+ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
+ msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
+ if examples:
+ examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
+ msg += f"\nValid examples include: {examples}."
+ if urls:
+ urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
+ msg += f"\nFor more insformation, see: {urls}."
+ return msg
+
+ if datasets_dtype in pa.__dict__:
+ return pa.__dict__[datasets_dtype]()
+
+ if (datasets_dtype + "_") in pa.__dict__:
+ return pa.__dict__[datasets_dtype + "_"]()
+
+ timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
+ if timestamp_matches:
+ timestamp_internals = timestamp_matches.group(1)
+ internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
+ if timestamp_internals in ["s", "ms", "us", "ns"]:
+ return pa.timestamp(timestamp_internals)
+ elif internals_matches:
+ return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "timestamp",
+ examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
+ )
+ )
+
+ duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
+ if duration_matches:
+ duration_internals = duration_matches.group(1)
+ if duration_internals in ["s", "ms", "us", "ns"]:
+ return pa.duration(duration_internals)
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "duration",
+ examples=["duration[s]", "duration[us]"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
+ )
+ )
+
+ time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
+ if time_matches:
+ time_internals_bits = time_matches.group(1)
+ if time_internals_bits == "32":
+ time_internals_unit = time_matches.group(2)
+ if time_internals_unit in ["s", "ms"]:
+ return pa.time32(time_internals_unit)
+ else:
+ raise ValueError(
+ f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
+ )
+ elif time_internals_bits == "64":
+ time_internals_unit = time_matches.group(2)
+ if time_internals_unit in ["us", "ns"]:
+ return pa.time64(time_internals_unit)
+ else:
+ raise ValueError(
+ f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
+ )
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "time",
+ examples=["time32[s]", "time64[us]"],
+ urls=[
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
+ ],
+ )
+ )
+
+ decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
+ if decimal_matches:
+ decimal_internals_bits = decimal_matches.group(1)
+ if decimal_internals_bits == "128":
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
+ if decimal_internals_precision_and_scale:
+ precision = decimal_internals_precision_and_scale.group(1)
+ scale = decimal_internals_precision_and_scale.group(2)
+ return pa.decimal128(int(precision), int(scale))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal128",
+ examples=["decimal128(10, 2)", "decimal128(4, -2)"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
+ )
+ )
+ elif decimal_internals_bits == "256":
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
+ if decimal_internals_precision_and_scale:
+ precision = decimal_internals_precision_and_scale.group(1)
+ scale = decimal_internals_precision_and_scale.group(2)
+ return pa.decimal256(int(precision), int(scale))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal256",
+ examples=["decimal256(30, 2)", "decimal256(38, -4)"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
+ )
+ )
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal",
+ examples=["decimal128(12, 3)", "decimal256(40, 6)"],
+ urls=[
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
+ ],
+ )
+ )
+
+ raise ValueError(
+ f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
+ f"Please make sure to use a correct data type, see: "
+ f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
+ )
+
+
+def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
+ """
+ Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
+ It works recursively.
+
+ If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
+
+ Args:
+ obj: the object (nested struct) to cast.
+ only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
+ Indeed Arrow only support converting 1-dimensional array values.
+ optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
+ and if it doesn't, not checking the rest of the list elements.
+
+ Returns:
+ casted_obj: the casted object
+ has_changed (bool): True if the object has been changed, False if it is identical
+ """
+
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
+ import tensorflow as tf
+
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
+ import jax.numpy as jnp
+
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ if isinstance(obj, np.ndarray):
+ if obj.ndim == 0:
+ return obj[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj, False
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj
+ ],
+ True,
+ )
+ elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
+ if obj.ndim == 0:
+ return obj.detach().cpu().numpy()[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj.detach().cpu().numpy(), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj.detach().cpu().numpy()
+ ],
+ True,
+ )
+ elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
+ if obj.ndim == 0:
+ return obj.numpy()[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj.numpy(), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj.numpy()
+ ],
+ True,
+ )
+ elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
+ if obj.ndim == 0:
+ return np.asarray(obj)[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return np.asarray(obj), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in np.asarray(obj)
+ ],
+ True,
+ )
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
+ return encode_pil_image(obj), True
+ elif isinstance(obj, pd.Series):
+ return (
+ _cast_to_python_objects(
+ obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0],
+ True,
+ )
+ elif isinstance(obj, pd.DataFrame):
+ return (
+ {
+ key: _cast_to_python_objects(
+ value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for key, value in obj.to_dict("series").items()
+ },
+ True,
+ )
+ elif isinstance(obj, pd.Timestamp):
+ return obj.to_pydatetime(), True
+ elif isinstance(obj, pd.Timedelta):
+ return obj.to_pytimedelta(), True
+ elif isinstance(obj, Mapping):
+ has_changed = not isinstance(obj, dict)
+ output = {}
+ for k, v in obj.items():
+ casted_v, has_changed_v = _cast_to_python_objects(
+ v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )
+ has_changed |= has_changed_v
+ output[k] = casted_v
+ return output if has_changed else obj, has_changed
+ elif hasattr(obj, "__array__"):
+ return (
+ _cast_to_python_objects(
+ obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0],
+ True,
+ )
+ elif isinstance(obj, (list, tuple)):
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt):
+ break
+ casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
+ first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )
+ if has_changed_first_elmt or not optimize_list_casting:
+ return (
+ [
+ _cast_to_python_objects(
+ elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for elmt in obj
+ ],
+ True,
+ )
+ else:
+ if isinstance(obj, (list, tuple)):
+ return obj, False
+ else:
+ return list(obj), True
+ else:
+ return obj, False
+ else:
+ return obj, False
+
+
+def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
+ """
+ Cast numpy/pytorch/tensorflow/pandas objects to python lists.
+ It works recursively.
+
+ If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
+
+ Args:
+ obj: the object (nested struct) to cast
+ only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
+ Indeed Arrow only support converting 1-dimensional array values.
+ optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
+ and if it doesn't, not checking the rest of the list elements.
+
+ Returns:
+ casted_obj: the casted object
+ """
+ return _cast_to_python_objects(
+ obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+
+
+@dataclass
+class Value:
+ """
+ The `Value` dtypes are as follows:
+
+ - `null`
+ - `bool`
+ - `int8`
+ - `int16`
+ - `int32`
+ - `int64`
+ - `uint8`
+ - `uint16`
+ - `uint32`
+ - `uint64`
+ - `float16`
+ - `float32` (alias float)
+ - `float64` (alias double)
+ - `time32[(s|ms)]`
+ - `time64[(us|ns)]`
+ - `timestamp[(s|ms|us|ns)]`
+ - `timestamp[(s|ms|us|ns), tz=(tzstring)]`
+ - `date32`
+ - `date64`
+ - `duration[(s|ms|us|ns)]`
+ - `decimal128(precision, scale)`
+ - `decimal256(precision, scale)`
+ - `binary`
+ - `large_binary`
+ - `string`
+ - `large_string`
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'stars': Value(dtype='int32')})
+ >>> features
+ {'stars': Value(dtype='int32', id=None)}
+ ```
+ """
+
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="Value", init=False, repr=False)
+
+ def __post_init__(self):
+ if self.dtype == "double": # fix inferred type
+ self.dtype = "float64"
+ if self.dtype == "float": # fix inferred type
+ self.dtype = "float32"
+ self.pa_type = string_to_arrow(self.dtype)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value):
+ if pa.types.is_boolean(self.pa_type):
+ return bool(value)
+ elif pa.types.is_integer(self.pa_type):
+ return int(value)
+ elif pa.types.is_floating(self.pa_type):
+ return float(value)
+ elif pa.types.is_string(self.pa_type):
+ return str(value)
+ else:
+ return value
+
+
+class _ArrayXD:
+ def __post_init__(self):
+ self.shape = tuple(self.shape)
+
+ def __call__(self):
+ pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
+ return pa_type
+
+ def encode_example(self, value):
+ return value
+
+
+@dataclass
+class Array2D(_ArrayXD):
+ """Create a two-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array2D", init=False, repr=False)
+
+
+@dataclass
+class Array3D(_ArrayXD):
+ """Create a three-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array3D", init=False, repr=False)
+
+
+@dataclass
+class Array4D(_ArrayXD):
+ """Create a four-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array4D", init=False, repr=False)
+
+
+@dataclass
+class Array5D(_ArrayXD):
+ """Create a five-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array5D", init=False, repr=False)
+
+
+class _ArrayXDExtensionType(pa.ExtensionType):
+ ndims: Optional[int] = None
+
+ def __init__(self, shape: tuple, dtype: str):
+ if self.ndims is None or self.ndims <= 1:
+ raise ValueError("You must instantiate an array type with a value for dim that is > 1")
+ if len(shape) != self.ndims:
+ raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
+ for dim in range(1, self.ndims):
+ if shape[dim] is None:
+ raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
+ self.shape = tuple(shape)
+ self.value_type = dtype
+ self.storage_dtype = self._generate_dtype(self.value_type)
+ pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
+
+ def __arrow_ext_serialize__(self):
+ return json.dumps((self.shape, self.value_type)).encode()
+
+ @classmethod
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
+ args = json.loads(serialized)
+ return cls(*args)
+
+ # This was added to pa.ExtensionType in pyarrow >= 13.0.0
+ def __reduce__(self):
+ return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
+
+ def __hash__(self):
+ return hash((self.__class__, self.shape, self.value_type))
+
+ def __arrow_ext_class__(self):
+ return ArrayExtensionArray
+
+ def _generate_dtype(self, dtype):
+ dtype = string_to_arrow(dtype)
+ for d in reversed(self.shape):
+ dtype = pa.list_(dtype)
+ # Don't specify the size of the list, since fixed length list arrays have issues
+ # being validated after slicing in pyarrow 0.17.1
+ return dtype
+
+ def to_pandas_dtype(self):
+ return PandasArrayExtensionDtype(self.value_type)
+
+
+class Array2DExtensionType(_ArrayXDExtensionType):
+ ndims = 2
+
+
+class Array3DExtensionType(_ArrayXDExtensionType):
+ ndims = 3
+
+
+class Array4DExtensionType(_ArrayXDExtensionType):
+ ndims = 4
+
+
+class Array5DExtensionType(_ArrayXDExtensionType):
+ ndims = 5
+
+
+# Register the extension types for deserialization
+pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
+pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
+pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
+pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
+
+
+def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
+ """
+ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
+ This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
+
+ # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
+ # primitive types are types for which the physical representation in arrow and in numpy
+ # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
+ # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
+ # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
+ """
+
+ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
+ if pa.types.is_list(pa_type):
+ return _unnest_pa_type(pa_type.value_type)
+ return pa_type
+
+ if unnest:
+ pa_type = _unnest_pa_type(pa_type)
+ return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
+
+
+class ArrayExtensionArray(pa.ExtensionArray):
+ def __array__(self):
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
+ return self.to_numpy(zero_copy_only=zero_copy_only)
+
+ def __getitem__(self, i):
+ return self.storage[i]
+
+ def to_numpy(self, zero_copy_only=True):
+ storage: pa.ListArray = self.storage
+ null_mask = storage.is_null().to_numpy(zero_copy_only=False)
+
+ if self.type.shape[0] is not None:
+ size = 1
+ null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
+
+ for i in range(self.type.ndims):
+ size *= self.type.shape[i]
+ storage = storage.flatten()
+ numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
+ numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
+
+ if len(null_indices):
+ numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
+
+ else:
+ shape = self.type.shape
+ ndims = self.type.ndims
+ arrays = []
+ first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
+ for i, is_null in enumerate(null_mask):
+ if is_null:
+ arrays.append(np.nan)
+ else:
+ storage_el = storage[i : i + 1]
+ first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
+ # flatten storage
+ for _ in range(ndims):
+ storage_el = storage_el.flatten()
+
+ numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
+ arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
+
+ if len(np.unique(np.diff(first_dim_offsets))) > 1:
+ # ragged
+ numpy_arr = np.empty(len(arrays), dtype=object)
+ numpy_arr[:] = arrays
+ else:
+ numpy_arr = np.array(arrays)
+
+ return numpy_arr
+
+ def to_pylist(self):
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
+ numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
+ if self.type.shape[0] is None and numpy_arr.dtype == object:
+ return [arr.tolist() for arr in numpy_arr.tolist()]
+ else:
+ return numpy_arr.tolist()
+
+
+class PandasArrayExtensionDtype(PandasExtensionDtype):
+ _metadata = "value_type"
+
+ def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
+ self._value_type = value_type
+
+ def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
+ if isinstance(array, pa.ChunkedArray):
+ array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
+ zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
+ numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
+ return PandasArrayExtensionArray(numpy_arr)
+
+ @classmethod
+ def construct_array_type(cls):
+ return PandasArrayExtensionArray
+
+ @property
+ def type(self) -> type:
+ return np.ndarray
+
+ @property
+ def kind(self) -> str:
+ return "O"
+
+ @property
+ def name(self) -> str:
+ return f"array[{self.value_type}]"
+
+ @property
+ def value_type(self) -> np.dtype:
+ return self._value_type
+
+
+class PandasArrayExtensionArray(PandasExtensionArray):
+ def __init__(self, data: np.ndarray, copy: bool = False):
+ self._data = data if not copy else np.array(data)
+ self._dtype = PandasArrayExtensionDtype(data.dtype)
+
+ def __array__(self, dtype=None):
+ """
+ Convert to NumPy Array.
+ Note that Pandas expects a 1D array when dtype is set to object.
+ But for other dtypes, the returned shape is the same as the one of ``data``.
+
+ More info about pandas 1D requirement for PandasExtensionArray here:
+ https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
+
+ """
+ if dtype == object:
+ out = np.empty(len(self._data), dtype=object)
+ for i in range(len(self._data)):
+ out[i] = self._data[i]
+ return out
+ if dtype is None:
+ return self._data
+ else:
+ return self._data.astype(dtype)
+
+ def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
+ return PandasArrayExtensionArray(self._data, copy=True)
+
+ @classmethod
+ def _from_sequence(
+ cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
+ ) -> "PandasArrayExtensionArray":
+ if len(scalars) > 1 and all(
+ isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
+ ):
+ data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
+ else:
+ data = np.empty(len(scalars), dtype=object)
+ data[:] = scalars
+ return cls(data, copy=copy)
+
+ @classmethod
+ def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
+ if len(to_concat) > 1 and all(
+ va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
+ for va in to_concat
+ ):
+ data = np.vstack([va._data for va in to_concat])
+ else:
+ data = np.empty(len(to_concat), dtype=object)
+ data[:] = [va._data for va in to_concat]
+ return cls(data, copy=False)
+
+ @property
+ def dtype(self) -> PandasArrayExtensionDtype:
+ return self._dtype
+
+ @property
+ def nbytes(self) -> int:
+ return self._data.nbytes
+
+ def isna(self) -> np.ndarray:
+ return np.array([pd.isna(arr).any() for arr in self._data])
+
+ def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
+ raise NotImplementedError()
+
+ def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
+ if isinstance(item, int):
+ return self._data[item]
+ return PandasArrayExtensionArray(self._data[item], copy=False)
+
+ def take(
+ self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
+ ) -> "PandasArrayExtensionArray":
+ indices: np.ndarray = np.asarray(indices, dtype=int)
+ if allow_fill:
+ fill_value = (
+ self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
+ )
+ mask = indices == -1
+ if (indices < -1).any():
+ raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
+ elif len(self) > 0:
+ pass
+ elif not np.all(mask):
+ raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
+ else:
+ data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
+ return PandasArrayExtensionArray(data, copy=False)
+ took = self._data.take(indices, axis=0)
+ if allow_fill and mask.any():
+ took[mask] = [fill_value] * np.sum(mask)
+ return PandasArrayExtensionArray(took, copy=False)
+
+ def __len__(self) -> int:
+ return len(self._data)
+
+ def __eq__(self, other) -> np.ndarray:
+ if not isinstance(other, PandasArrayExtensionArray):
+ raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
+ return (self._data == other._data).all()
+
+
+def pandas_types_mapper(dtype):
+ if isinstance(dtype, _ArrayXDExtensionType):
+ return PandasArrayExtensionDtype(dtype.value_type)
+
+
+@dataclass
+class ClassLabel:
+ """Feature type for integer class labels.
+
+ There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
+
+ * `num_classes`: Create 0 to (num_classes-1) labels.
+ * `names`: List of label strings.
+ * `names_file`: File containing the list of labels.
+
+ Under the hood the labels are stored as integers.
+ You can use negative integers to represent unknown/missing labels.
+
+ Args:
+ num_classes (`int`, *optional*):
+ Number of classes. All labels must be < `num_classes`.
+ names (`list` of `str`, *optional*):
+ String names for the integer classes.
+ The order in which the names are provided is kept.
+ names_file (`str`, *optional*):
+ Path to a file with names for the integer classes, one per line.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
+ >>> features
+ {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
+ ```
+ """
+
+ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
+ names: List[str] = None
+ names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "int64"
+ pa_type: ClassVar[Any] = pa.int64()
+ _str2int: ClassVar[Dict[str, int]] = None
+ _int2str: ClassVar[Dict[int, int]] = None
+ _type: str = field(default="ClassLabel", init=False, repr=False)
+
+ def __post_init__(self, num_classes, names_file):
+ self.num_classes = num_classes
+ self.names_file = names_file
+ if self.names_file is not None and self.names is not None:
+ raise ValueError("Please provide either names or names_file but not both.")
+ # Set self.names
+ if self.names is None:
+ if self.names_file is not None:
+ self.names = self._load_names_from_file(self.names_file)
+ elif self.num_classes is not None:
+ self.names = [str(i) for i in range(self.num_classes)]
+ else:
+ raise ValueError("Please provide either num_classes, names or names_file.")
+ elif not isinstance(self.names, SequenceABC):
+ raise TypeError(f"Please provide names as a list, is {type(self.names)}")
+ # Set self.num_classes
+ if self.num_classes is None:
+ self.num_classes = len(self.names)
+ elif self.num_classes != len(self.names):
+ raise ValueError(
+ "ClassLabel number of names do not match the defined num_classes. "
+ f"Got {len(self.names)} names VS {self.num_classes} num_classes"
+ )
+ # Prepare mappings
+ self._int2str = [str(name) for name in self.names]
+ self._str2int = {name: i for i, name in enumerate(self._int2str)}
+ if len(self._int2str) != len(self._str2int):
+ raise ValueError("Some label names are duplicated. Each label name should be unique.")
+
+ def __call__(self):
+ return self.pa_type
+
+ def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
+ """Conversion class name `string` => `integer`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> ds.features["label"].str2int('neg')
+ 0
+ ```
+ """
+ if not isinstance(values, str) and not isinstance(values, Iterable):
+ raise ValueError(
+ f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
+ )
+ return_list = True
+ if isinstance(values, str):
+ values = [values]
+ return_list = False
+
+ output = [self._strval2int(value) for value in values]
+ return output if return_list else output[0]
+
+ def _strval2int(self, value: str) -> int:
+ failed_parse = False
+ value = str(value)
+ # first attempt - raw string value
+ int_value = self._str2int.get(value)
+ if int_value is None:
+ # second attempt - strip whitespace
+ int_value = self._str2int.get(value.strip())
+ if int_value is None:
+ # third attempt - convert str to int
+ try:
+ int_value = int(value)
+ except ValueError:
+ failed_parse = True
+ else:
+ if int_value < -1 or int_value >= self.num_classes:
+ failed_parse = True
+ if failed_parse:
+ raise ValueError(f"Invalid string class label {value}")
+ return int_value
+
+ def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
+ """Conversion `integer` => class name `string`.
+
+ Regarding unknown/missing labels: passing negative integers raises `ValueError`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> ds.features["label"].int2str(0)
+ 'neg'
+ ```
+ """
+ if not isinstance(values, int) and not isinstance(values, Iterable):
+ raise ValueError(
+ f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
+ )
+ return_list = True
+ if isinstance(values, int):
+ values = [values]
+ return_list = False
+
+ for v in values:
+ if not 0 <= v < self.num_classes:
+ raise ValueError(f"Invalid integer class label {v:d}")
+
+ output = [self._int2str[int(v)] for v in values]
+ return output if return_list else output[0]
+
+ def encode_example(self, example_data):
+ if self.num_classes is None:
+ raise ValueError(
+ "Trying to use ClassLabel feature with undefined number of class. "
+ "Please set ClassLabel.names or num_classes."
+ )
+
+ # If a string is given, convert to associated integer
+ if isinstance(example_data, str):
+ example_data = self.str2int(example_data)
+
+ # Allowing -1 to mean no label.
+ if not -1 <= example_data < self.num_classes:
+ raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
+ return example_data
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
+ """Cast an Arrow array to the `ClassLabel` arrow storage type.
+ The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
+
+ - `pa.string()`
+ - `pa.int()`
+
+ Args:
+ storage (`Union[pa.StringArray, pa.IntegerArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
+ """
+ if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
+ min_max = pc.min_max(storage).as_py()
+ if min_max["max"] is not None and min_max["max"] >= self.num_classes:
+ raise ValueError(
+ f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
+ )
+ elif isinstance(storage, pa.StringArray):
+ storage = pa.array(
+ [self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
+ )
+ return array_cast(storage, self.pa_type)
+
+ @staticmethod
+ def _load_names_from_file(names_filepath):
+ with open(names_filepath, encoding="utf-8") as f:
+ return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
+
+
+@dataclass
+class Sequence:
+ """Construct a list of feature from a single type or a dict of types.
+ Mostly here for compatiblity with tfds.
+
+ Args:
+ feature:
+ A list of features of a single type or a dictionary of types.
+ length (`int`):
+ Length of the sequence.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features, Sequence, Value, ClassLabel
+ >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
+ >>> features
+ {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
+ ```
+ """
+
+ feature: Any
+ length: int = -1
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "list"
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="Sequence", init=False, repr=False)
+
+
+FeatureType = Union[
+ dict,
+ list,
+ tuple,
+ Value,
+ ClassLabel,
+ Translation,
+ TranslationVariableLanguages,
+ Sequence,
+ Array2D,
+ Array3D,
+ Array4D,
+ Array5D,
+ Audio,
+ Image,
+]
+
+
+def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
+ """
+ Check if the object is not None.
+ If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
+ """
+ if obj is None:
+ return False
+ elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
+ if len(obj) > 0:
+ if schema is None:
+ pass
+ elif isinstance(schema, (list, tuple)):
+ schema = schema[0]
+ else:
+ schema = schema.feature
+ return _check_non_null_non_empty_recursive(obj[0], schema)
+ else:
+ return False
+ else:
+ return True
+
+
+def get_nested_type(schema: FeatureType) -> pa.DataType:
+ """
+ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
+ generate_from_arrow_type().
+
+ It performs double-duty as the implementation of Features.type and handles the conversion of
+ datasets.Feature->pa.struct
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, Features):
+ return pa.struct(
+ {key: get_nested_type(schema[key]) for key in schema}
+ ) # Features is subclass of dict, and dict order is deterministic since Python 3.6
+ elif isinstance(schema, dict):
+ return pa.struct(
+ {key: get_nested_type(schema[key]) for key in schema}
+ ) # however don't sort on struct types since the order matters
+ elif isinstance(schema, (list, tuple)):
+ if len(schema) != 1:
+ raise ValueError("When defining list feature, you should just provide one example of the inner type")
+ value_type = get_nested_type(schema[0])
+ return pa.list_(value_type)
+ elif isinstance(schema, Sequence):
+ value_type = get_nested_type(schema.feature)
+ # We allow to reverse list of dict => dict of list for compatibility with tfds
+ if isinstance(schema.feature, dict):
+ return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
+ return pa.list_(value_type, schema.length)
+
+ # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
+ return schema()
+
+
+def encode_nested_example(schema, obj, level=0):
+ """Encode a nested example.
+ This is used since some features (in particular ClassLabel) have some logic during encoding.
+
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
+ If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, dict):
+ if level == 0 and obj is None:
+ raise ValueError("Got None but expected a dictionary instead")
+ return (
+ {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
+ if obj is not None
+ else None
+ )
+
+ elif isinstance(schema, (list, tuple)):
+ sub_schema = schema[0]
+ if obj is None:
+ return None
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
+ break
+ if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
+ return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
+ return list(obj)
+ elif isinstance(schema, Sequence):
+ if obj is None:
+ return None
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
+ if isinstance(schema.feature, dict):
+ # dict of list to fill
+ list_dict = {}
+ if isinstance(obj, (list, tuple)):
+ # obj is a list of dict
+ for k in schema.feature:
+ list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
+ return list_dict
+ else:
+ # obj is a single dict
+ for k in schema.feature:
+ list_dict[k] = (
+ [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
+ if k in obj
+ else None
+ )
+ return list_dict
+ # schema.feature is not a dict
+ if isinstance(obj, str): # don't interpret a string as a list
+ raise ValueError(f"Got a string but expected a list instead: '{obj}'")
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
+ break
+ # be careful when comparing tensors here
+ if (
+ not isinstance(first_elmt, list)
+ or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
+ ):
+ return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
+ return list(obj)
+ # Object with special encoding:
+ # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
+ elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
+ return schema.encode_example(obj) if obj is not None else None
+ # Other object should be directly convertible to a native Arrow type (like Translation and Translation)
+ return obj
+
+
+def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode a nested example.
+ This is used since some features (in particular Audio and Image) have some logic during decoding.
+
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
+ If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, dict):
+ return (
+ {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
+ if obj is not None
+ else None
+ )
+ elif isinstance(schema, (list, tuple)):
+ sub_schema = schema[0]
+ if obj is None:
+ return None
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
+ break
+ if decode_nested_example(sub_schema, first_elmt) != first_elmt:
+ return [decode_nested_example(sub_schema, o) for o in obj]
+ return list(obj)
+ elif isinstance(schema, Sequence):
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
+ if isinstance(schema.feature, dict):
+ return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
+ else:
+ return decode_nested_example([schema.feature], obj)
+ # Object with special decoding:
+ elif isinstance(schema, (Audio, Image)):
+ # we pass the token to read and decode files from private repositories in streaming mode
+ if obj is not None and schema.decode:
+ return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
+ return obj
+
+
+_FEATURE_TYPES: Dict[str, FeatureType] = {
+ Value.__name__: Value,
+ ClassLabel.__name__: ClassLabel,
+ Translation.__name__: Translation,
+ TranslationVariableLanguages.__name__: TranslationVariableLanguages,
+ Sequence.__name__: Sequence,
+ Array2D.__name__: Array2D,
+ Array3D.__name__: Array3D,
+ Array4D.__name__: Array4D,
+ Array5D.__name__: Array5D,
+ Audio.__name__: Audio,
+ Image.__name__: Image,
+}
+
+
+@experimental
+def register_feature(
+ feature_cls: type,
+ feature_type: str,
+):
+ """
+ Register a Feature object using a name and class.
+ This function must be used on a Feature class.
+ """
+ if feature_type in _FEATURE_TYPES:
+ logger.warning(
+ f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})"
+ )
+ _FEATURE_TYPES[feature_type] = feature_cls
+
+
+def generate_from_dict(obj: Any):
+ """Regenerate the nested feature object from a deserialized dict.
+ We use the '_type' fields to get the dataclass name to load.
+
+ generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
+ a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
+ :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
+ mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
+ that :class:`Value` automatically performs.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(obj, list):
+ return [generate_from_dict(value) for value in obj]
+ # Otherwise we have a dict or a dataclass
+ if "_type" not in obj or isinstance(obj["_type"], dict):
+ return {key: generate_from_dict(value) for key, value in obj.items()}
+ obj = dict(obj)
+ _type = obj.pop("_type")
+ class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None)
+
+ if class_type is None:
+ raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}")
+
+ if class_type == Sequence:
+ return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
+
+ field_names = {f.name for f in fields(class_type)}
+ return class_type(**{k: v for k, v in obj.items() if k in field_names})
+
+
+def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
+ """
+ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
+ a single field.
+
+ This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
+
+ This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
+ full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
+ """
+ if isinstance(pa_type, pa.StructType):
+ return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
+ elif isinstance(pa_type, pa.FixedSizeListType):
+ return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
+ elif isinstance(pa_type, pa.ListType):
+ feature = generate_from_arrow_type(pa_type.value_type)
+ if isinstance(feature, (dict, tuple, list)):
+ return [feature]
+ return Sequence(feature=feature)
+ elif isinstance(pa_type, _ArrayXDExtensionType):
+ array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
+ return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
+ elif isinstance(pa_type, pa.DictionaryType):
+ raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
+ elif isinstance(pa_type, pa.DataType):
+ return Value(dtype=_arrow_to_datasets_dtype(pa_type))
+ else:
+ raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
+
+
+def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
+ """Build a PyArrow ListArray from a multidimensional NumPy array"""
+ arr = np.array(arr)
+ values = pa.array(arr.flatten(), type=type)
+ for i in range(arr.ndim - 1):
+ n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
+ step_offsets = arr.shape[arr.ndim - i - 1]
+ offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
+ values = pa.ListArray.from_arrays(offsets, values)
+ return values
+
+
+def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
+ null_mask = np.array([arr is None for arr in l_arr])
+ null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
+ l_arr = [arr for arr in l_arr if arr is not None]
+ offsets = np.cumsum(
+ [0] + [len(arr) for arr in l_arr], dtype=object
+ ) # convert to dtype object to allow None insertion
+ offsets = np.insert(offsets, null_indices, None)
+ offsets = pa.array(offsets, type=pa.int32())
+ values = pa.concat_arrays(l_arr)
+ return pa.ListArray.from_arrays(offsets, values)
+
+
+def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
+ """Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
+ if len(l_arr) > 0:
+ return list_of_pa_arrays_to_pyarrow_listarray(
+ [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
+ )
+ else:
+ return pa.array([], type=type)
+
+
+def contains_any_np_array(data: Any):
+ """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
+
+ Args:
+ data (Any): Data.
+
+ Returns:
+ bool
+ """
+ if isinstance(data, np.ndarray):
+ return True
+ elif isinstance(data, list):
+ return contains_any_np_array(first_non_null_value(data)[1])
+ else:
+ return False
+
+
+def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
+ """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
+
+ Args:
+ data (Union[np.ndarray, List]): Data.
+ type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
+
+ Returns:
+ pa.ListArray
+ """
+ if isinstance(data, np.ndarray):
+ return numpy_to_pyarrow_listarray(data, type=type)
+ elif isinstance(data, list):
+ return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
+
+
+def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
+ """Convert to PyArrow ListArray.
+
+ Args:
+ data (Any): Sequence, iterable, np.ndarray or pd.Series.
+ pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
+
+ Returns:
+ pyarrow.Array
+ """
+ if contains_any_np_array(data):
+ return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
+ else:
+ return pa.array(data, pa_type.storage_dtype)
+
+
+def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
+ """Visit a (possibly nested) feature.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ visited feature (FeatureType)
+ """
+ if isinstance(feature, dict):
+ out = func({k: _visit(f, func) for k, f in feature.items()})
+ elif isinstance(feature, (list, tuple)):
+ out = func([_visit(feature[0], func)])
+ elif isinstance(feature, Sequence):
+ out = func(Sequence(_visit(feature.feature, func), length=feature.length))
+ else:
+ out = func(feature)
+ return feature if out is None else out
+
+
+def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
+ """Check if a (possibly nested) feature requires decoding.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
+ of the `decode` attribute of the decodable feature types.
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_decoding(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_decoding(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_decoding(feature.feature)
+ else:
+ return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
+
+
+def require_storage_cast(feature: FeatureType) -> bool:
+ """Check if a (possibly nested) feature requires storage casting.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_storage_cast(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_storage_cast(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_storage_cast(feature.feature)
+ else:
+ return hasattr(feature, "cast_storage")
+
+
+def require_storage_embed(feature: FeatureType) -> bool:
+ """Check if a (possibly nested) feature requires embedding data into storage.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_storage_cast(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_storage_cast(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_storage_cast(feature.feature)
+ else:
+ return hasattr(feature, "embed_storage")
+
+
+def keep_features_dicts_synced(func):
+ """
+ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
+ in sync with the main dictionary.
+ """
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Features" = args[0]
+ args = args[1:]
+ else:
+ self: "Features" = kwargs.pop("self")
+ out = func(self, *args, **kwargs)
+ assert hasattr(self, "_column_requires_decoding")
+ self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
+ return out
+
+ wrapper._decorator_name_ = "_keep_dicts_synced"
+ return wrapper
+
+
+class Features(dict):
+ """A special dictionary that defines the internal structure of a dataset.
+
+ Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
+ and values are the type of that column.
+
+ `FieldType` can be one of the following:
+ - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
+ - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
+ associated to them and will be stored as integers in the dataset.
+ - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
+ features. It's possible to have nested fields of nested fields in an arbitrary manner.
+ - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
+ `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
+ type hosted in this list.
+
+
+
+ A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
+ lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
+ un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
+ [`~datasets.Sequence`].
+
+
+
+ - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
+ - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
+ to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
+ - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
+ or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
+ - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
+ """
+
+ def __init__(*args, **kwargs):
+ # self not in the signature to allow passing self as a kwarg
+ if not args:
+ raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
+ self, *args = args
+ super(Features, self).__init__(*args, **kwargs)
+ self._column_requires_decoding: Dict[str, bool] = {
+ col: require_decoding(feature) for col, feature in self.items()
+ }
+
+ __setitem__ = keep_features_dicts_synced(dict.__setitem__)
+ __delitem__ = keep_features_dicts_synced(dict.__delitem__)
+ update = keep_features_dicts_synced(dict.update)
+ setdefault = keep_features_dicts_synced(dict.setdefault)
+ pop = keep_features_dicts_synced(dict.pop)
+ popitem = keep_features_dicts_synced(dict.popitem)
+ clear = keep_features_dicts_synced(dict.clear)
+
+ def __reduce__(self):
+ return Features, (dict(self),)
+
+ @property
+ def type(self):
+ """
+ Features field types.
+
+ Returns:
+ :obj:`pyarrow.DataType`
+ """
+ return get_nested_type(self)
+
+ @property
+ def arrow_schema(self):
+ """
+ Features schema.
+
+ Returns:
+ :obj:`pyarrow.Schema`
+ """
+ hf_metadata = {"info": {"features": self.to_dict()}}
+ return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
+
+ @classmethod
+ def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
+ """
+ Construct [`Features`] from Arrow Schema.
+ It also checks the schema metadata for Hugging Face Datasets features.
+ Non-nullable fields are not supported and set to nullable.
+
+ Args:
+ pa_schema (`pyarrow.Schema`):
+ Arrow Schema.
+
+ Returns:
+ [`Features`]
+ """
+ # try to load features from the arrow schema metadata
+ metadata_features = Features()
+ if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
+ metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
+ if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
+ metadata_features = Features.from_dict(metadata["info"]["features"])
+ metadata_features_schema = metadata_features.arrow_schema
+ obj = {
+ field.name: (
+ metadata_features[field.name]
+ if field.name in metadata_features and metadata_features_schema.field(field.name) == field
+ else generate_from_arrow_type(field.type)
+ )
+ for field in pa_schema
+ }
+ return cls(**obj)
+
+ @classmethod
+ def from_dict(cls, dic) -> "Features":
+ """
+ Construct [`Features`] from dict.
+
+ Regenerate the nested feature object from a deserialized dict.
+ We use the `_type` key to infer the dataclass name of the feature `FieldType`.
+
+ It allows for a convenient constructor syntax
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
+ a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
+ [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
+ any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
+ dtypes that [`Value`] automatically performs.
+
+ Args:
+ dic (`dict[str, Any]`):
+ Python dictionary.
+
+ Returns:
+ `Features`
+
+ Example::
+ >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
+ {'_type': Value(dtype='string', id=None)}
+ """
+ obj = generate_from_dict(dic)
+ return cls(**obj)
+
+ def to_dict(self):
+ return asdict(self)
+
+ def _to_yaml_list(self) -> list:
+ # we compute the YAML list from the dict representation that is used for JSON dump
+ yaml_data = self.to_dict()
+
+ def simplify(feature: dict) -> dict:
+ if not isinstance(feature, dict):
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
+
+ #
+ # sequence: -> sequence: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
+ feature["sequence"] = feature["sequence"]["dtype"]
+
+ #
+ # sequence: -> sequence:
+ # struct: -> - name: foo
+ # - name: foo -> dtype: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
+ feature["sequence"] = feature["sequence"]["struct"]
+
+ #
+ # list: -> list: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
+ feature["list"] = feature["list"]["dtype"]
+
+ #
+ # list: -> list:
+ # struct: -> - name: foo
+ # - name: foo -> dtype: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
+ feature["list"] = feature["list"]["struct"]
+
+ #
+ # class_label: -> class_label:
+ # names: -> names:
+ # - negative -> '0': negative
+ # - positive -> '1': positive
+ #
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
+ # server-side requirement: keys must be strings
+ feature["class_label"]["names"] = {
+ str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
+ }
+ return feature
+
+ def to_yaml_inner(obj: Union[dict, list]) -> dict:
+ if isinstance(obj, dict):
+ _type = obj.pop("_type", None)
+ if _type == "Sequence":
+ _feature = obj.pop("feature")
+ return simplify({"sequence": to_yaml_inner(_feature), **obj})
+ elif _type == "Value":
+ return obj
+ elif _type and not obj:
+ return {"dtype": camelcase_to_snakecase(_type)}
+ elif _type:
+ return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
+ else:
+ return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
+ elif isinstance(obj, list):
+ return simplify({"list": simplify(to_yaml_inner(obj[0]))})
+ elif isinstance(obj, tuple):
+ return to_yaml_inner(list(obj))
+ else:
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
+
+ def to_yaml_types(obj: dict) -> dict:
+ if isinstance(obj, dict):
+ return {k: to_yaml_types(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [to_yaml_types(v) for v in obj]
+ elif isinstance(obj, tuple):
+ return to_yaml_types(list(obj))
+ else:
+ return obj
+
+ return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
+
+ @classmethod
+ def _from_yaml_list(cls, yaml_data: list) -> "Features":
+ yaml_data = copy.deepcopy(yaml_data)
+
+ # we convert the list obtained from YAML data into the dict representation that is used for JSON dump
+
+ def unsimplify(feature: dict) -> dict:
+ if not isinstance(feature, dict):
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
+ #
+ # sequence: int32 -> sequence:
+ # -> dtype: int32
+ #
+ if isinstance(feature.get("sequence"), str):
+ feature["sequence"] = {"dtype": feature["sequence"]}
+ #
+ # list: int32 -> list:
+ # -> dtype: int32
+ #
+ if isinstance(feature.get("list"), str):
+ feature["list"] = {"dtype": feature["list"]}
+
+ #
+ # class_label: -> class_label:
+ # names: -> names:
+ # '0': negative -> - negative
+ # '1': positive -> - positive
+ #
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
+ label_ids = sorted(feature["class_label"]["names"], key=int)
+ if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
+ raise ValueError(
+ f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
+ )
+ feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
+ return feature
+
+ def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
+ if isinstance(obj, dict):
+ if not obj:
+ return {}
+ _type = next(iter(obj))
+ if _type == "sequence":
+ _feature = unsimplify(obj).pop(_type)
+ return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
+ if _type == "list":
+ return [from_yaml_inner(unsimplify(obj)[_type])]
+ if _type == "struct":
+ return from_yaml_inner(obj["struct"])
+ elif _type == "dtype":
+ if isinstance(obj["dtype"], str):
+ # e.g. int32, float64, string, audio, image
+ try:
+ Value(obj["dtype"])
+ return {**obj, "_type": "Value"}
+ except ValueError:
+ # e.g. Audio, Image, ArrayXD
+ return {"_type": snakecase_to_camelcase(obj["dtype"])}
+ else:
+ return from_yaml_inner(obj["dtype"])
+ else:
+ return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
+ elif isinstance(obj, list):
+ names = [_feature.pop("name") for _feature in obj]
+ return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
+ else:
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
+
+ return cls.from_dict(from_yaml_inner(yaml_data))
+
+ def encode_example(self, example):
+ """
+ Encode example into a format for Arrow.
+
+ Args:
+ example (`dict[str, Any]`):
+ Data in a Dataset row.
+
+ Returns:
+ `dict[str, Any]`
+ """
+ example = cast_to_python_objects(example)
+ return encode_nested_example(self, example)
+
+ def encode_column(self, column, column_name: str):
+ """
+ Encode column into a format for Arrow.
+
+ Args:
+ column (`list[Any]`):
+ Data in a Dataset column.
+ column_name (`str`):
+ Dataset column name.
+
+ Returns:
+ `list[Any]`
+ """
+ column = cast_to_python_objects(column)
+ return [encode_nested_example(self[column_name], obj, level=1) for obj in column]
+
+ def encode_batch(self, batch):
+ """
+ Encode batch into a format for Arrow.
+
+ Args:
+ batch (`dict[str, list[Any]]`):
+ Data in a Dataset batch.
+
+ Returns:
+ `dict[str, list[Any]]`
+ """
+ encoded_batch = {}
+ if set(batch) != set(self):
+ raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
+ for key, column in batch.items():
+ column = cast_to_python_objects(column)
+ encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column]
+ return encoded_batch
+
+ def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode example with custom feature decoding.
+
+ Args:
+ example (`dict[str, Any]`):
+ Dataset row data.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode audio or image files from private repositories on the Hub, you can pass
+ a dictionary `repo_id (str) -> token (bool or str)`.
+
+ Returns:
+ `dict[str, Any]`
+ """
+
+ return {
+ column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
+ if self._column_requires_decoding[column_name]
+ else value
+ for column_name, (feature, value) in zip_dict(
+ {key: value for key, value in self.items() if key in example}, example
+ )
+ }
+
+ def decode_column(self, column: list, column_name: str):
+ """Decode column with custom feature decoding.
+
+ Args:
+ column (`list[Any]`):
+ Dataset column data.
+ column_name (`str`):
+ Dataset column name.
+
+ Returns:
+ `list[Any]`
+ """
+ return (
+ [decode_nested_example(self[column_name], value) if value is not None else None for value in column]
+ if self._column_requires_decoding[column_name]
+ else column
+ )
+
+ def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode batch with custom feature decoding.
+
+ Args:
+ batch (`dict[str, list[Any]]`):
+ Dataset batch data.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode audio or image files from private repositories on the Hub, you can pass
+ a dictionary repo_id (str) -> token (bool or str)
+
+ Returns:
+ `dict[str, list[Any]]`
+ """
+ decoded_batch = {}
+ for column_name, column in batch.items():
+ decoded_batch[column_name] = (
+ [
+ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
+ if value is not None
+ else None
+ for value in column
+ ]
+ if self._column_requires_decoding[column_name]
+ else column
+ )
+ return decoded_batch
+
+ def copy(self) -> "Features":
+ """
+ Make a deep copy of [`Features`].
+
+ Returns:
+ [`Features`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> copy_of_features = ds.features.copy()
+ >>> copy_of_features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ return copy.deepcopy(self)
+
+ def reorder_fields_as(self, other: "Features") -> "Features":
+ """
+ Reorder Features fields to match the field order of other [`Features`].
+
+ The order of the fields is important since it matters for the underlying arrow data.
+ Re-ordering the fields allows to make the underlying arrow data type match.
+
+ Args:
+ other ([`Features`]):
+ The other [`Features`] to align with.
+
+ Returns:
+ [`Features`]
+
+ Example::
+
+ >>> from datasets import Features, Sequence, Value
+ >>> # let's say we have to features with a different order of nested fields (for a and b for example)
+ >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
+ >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
+ >>> assert f1.type != f2.type
+ >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
+ >>> f1.reorder_fields_as(f2)
+ {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
+ >>> assert f1.reorder_fields_as(f2).type == f2.type
+ """
+
+ def recursive_reorder(source, target, stack=""):
+ stack_position = " at " + stack[1:] if stack else ""
+ if isinstance(target, Sequence):
+ target = target.feature
+ if isinstance(target, dict):
+ target = {k: [v] for k, v in target.items()}
+ else:
+ target = [target]
+ if isinstance(source, Sequence):
+ source, id_, length = source.feature, source.id, source.length
+ if isinstance(source, dict):
+ source = {k: [v] for k, v in source.items()}
+ reordered = recursive_reorder(source, target, stack)
+ return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
+ else:
+ source = [source]
+ reordered = recursive_reorder(source, target, stack)
+ return Sequence(reordered[0], id=id_, length=length)
+ elif isinstance(source, dict):
+ if not isinstance(target, dict):
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
+ if sorted(source) != sorted(target):
+ message = (
+ f"Keys mismatch: between {source} (source) and {target} (target).\n"
+ f"{source.keys()-target.keys()} are missing from target "
+ f"and {target.keys()-source.keys()} are missing from source" + stack_position
+ )
+ raise ValueError(message)
+ return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
+ elif isinstance(source, list):
+ if not isinstance(target, list):
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
+ if len(source) != len(target):
+ raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
+ return [recursive_reorder(source[i], target[i], stack + ".") for i in range(len(target))]
+ else:
+ return source
+
+ return Features(recursive_reorder(self, other))
+
+ def flatten(self, max_depth=16) -> "Features":
+ """Flatten the features. Every dictionary column is removed and is replaced by
+ all the subfields it contains. The new fields are named by concatenating the
+ name of the original column and the subfield name like this: `.`.
+
+ If a column contains nested dictionaries, then all the lower-level subfields names are
+ also concatenated to form new columns: `..`, etc.
+
+ Returns:
+ [`Features`]:
+ The flattened features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad", split="train")
+ >>> ds.features.flatten()
+ {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
+ 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ ```
+ """
+ for depth in range(1, max_depth):
+ no_change = True
+ flattened = self.copy()
+ for column_name, subfeature in self.items():
+ if isinstance(subfeature, dict):
+ no_change = False
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
+ del flattened[column_name]
+ elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
+ no_change = False
+ flattened.update(
+ {
+ f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
+ for k, v in subfeature.feature.items()
+ }
+ )
+ del flattened[column_name]
+ elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
+ no_change = False
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
+ del flattened[column_name]
+ self = flattened
+ if no_change:
+ break
+ return self
+
+
+def _align_features(features_list: List[Features]) -> List[Features]:
+ """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
+ name2feature = {}
+ for features in features_list:
+ for k, v in features.items():
+ if k in name2feature and isinstance(v, dict):
+ # Recursively align features.
+ name2feature[k] = _align_features([name2feature[k], v])[0]
+ elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
+ name2feature[k] = v
+
+ return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
+
+
+def _check_if_features_can_be_aligned(features_list: List[Features]):
+ """Check if the dictionaries of features can be aligned.
+
+ Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
+ """
+ name2feature = {}
+ for features in features_list:
+ for k, v in features.items():
+ if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
+ name2feature[k] = v
+
+ for features in features_list:
+ for k, v in features.items():
+ if isinstance(v, dict) and isinstance(name2feature[k], dict):
+ # Deep checks for structure.
+ _check_if_features_can_be_aligned([name2feature[k], v])
+ elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
+ raise ValueError(
+ f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
+ )
diff --git a/venv/lib/python3.10/site-packages/datasets/features/image.py b/venv/lib/python3.10/site-packages/datasets/features/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..c63d4d439641a41d592c38a79343195e2beb591e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/features/image.py
@@ -0,0 +1,383 @@
+import os
+import sys
+import warnings
+from dataclasses import dataclass, field
+from io import BytesIO
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..download.download_config import DownloadConfig
+from ..table import array_cast
+from ..utils.file_utils import is_local_path, xopen
+from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
+
+
+if TYPE_CHECKING:
+ import PIL.Image
+
+ from .features import FeatureType
+
+
+_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
+_NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
+# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
+_VALID_IMAGE_ARRAY_DTPYES = [
+ np.dtype("|b1"),
+ np.dtype("|u1"),
+ np.dtype("u2"),
+ np.dtype("i2"),
+ np.dtype("u4"),
+ np.dtype("i4"),
+ np.dtype("f4"),
+ np.dtype("f8"),
+]
+
+
+@dataclass
+class Image:
+ """Image [`Feature`] to read image data from an image file.
+
+ Input: The Image feature accepts as input:
+ - A `str`: Absolute path to the image file (i.e. random access is allowed).
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the image file to the archive file.
+ - `bytes`: Bytes of the image file.
+
+ This is useful for archived files with sequential access.
+
+ - An `np.ndarray`: NumPy array representing an image.
+ - A `PIL.Image.Image`: PIL image object.
+
+ Args:
+ mode (`str`, *optional*):
+ The mode to convert the image to. If `None`, the native mode of the image is used.
+ decode (`bool`, defaults to `True`):
+ Whether to decode the image data. If `False`,
+ returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
+
+ Examples:
+
+ ```py
+ >>> from datasets import load_dataset, Image
+ >>> ds = load_dataset("beans", split="train")
+ >>> ds.features["image"]
+ Image(decode=True, id=None)
+ >>> ds[0]["image"]
+
+ >>> ds = ds.cast_column('image', Image(decode=False))
+ {'bytes': None,
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
+ ```
+ """
+
+ mode: Optional[str] = None
+ decode: bool = True
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "PIL.Image.Image"
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
+ _type: str = field(default="Image", init=False, repr=False)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
+ """Encode example into a format for Arrow.
+
+ Args:
+ value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
+ Data passed as input to Image feature.
+
+ Returns:
+ `dict` with "path" and "bytes" fields
+ """
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ if isinstance(value, list):
+ value = np.array(value)
+
+ if isinstance(value, str):
+ return {"path": value, "bytes": None}
+ elif isinstance(value, bytes):
+ return {"path": None, "bytes": value}
+ elif isinstance(value, np.ndarray):
+ # convert the image array to PNG/TIFF bytes
+ return encode_np_array(value)
+ elif isinstance(value, PIL.Image.Image):
+ # convert the PIL image to bytes (default format is PNG/TIFF)
+ return encode_pil_image(value)
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
+ # we set "bytes": None to not duplicate the data if they're already available locally
+ return {"bytes": None, "path": value.get("path")}
+ elif value.get("bytes") is not None or value.get("path") is not None:
+ # store the image bytes, and path is used to infer the image format using the file extension
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
+ else:
+ raise ValueError(
+ f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
+ )
+
+ def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
+ """Decode example image file into image data.
+
+ Args:
+ value (`str` or `dict`):
+ A string with the absolute image file path, a dictionary with
+ keys:
+
+ - `path`: String with absolute or relative image file path.
+ - `bytes`: The bytes of the image file.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode
+ image files from private repositories on the Hub, you can pass
+ a dictionary repo_id (`str`) -> token (`bool` or `str`).
+
+ Returns:
+ `PIL.Image.Image`
+ """
+ if not self.decode:
+ raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
+
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ import PIL.ImageOps
+ else:
+ raise ImportError("To support decoding images, please install 'Pillow'.")
+
+ if token_per_repo_id is None:
+ token_per_repo_id = {}
+
+ path, bytes_ = value["path"], value["bytes"]
+ if bytes_ is None:
+ if path is None:
+ raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
+ else:
+ if is_local_path(path):
+ image = PIL.Image.open(path)
+ else:
+ source_url = path.split("::")[-1]
+ pattern = (
+ config.HUB_DATASETS_URL
+ if source_url.startswith(config.HF_ENDPOINT)
+ else config.HUB_DATASETS_HFFS_URL
+ )
+ try:
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
+ token = token_per_repo_id.get(repo_id)
+ except ValueError:
+ token = None
+ download_config = DownloadConfig(token=token)
+ with xopen(path, "rb", download_config=download_config) as f:
+ bytes_ = BytesIO(f.read())
+ image = PIL.Image.open(bytes_)
+ else:
+ image = PIL.Image.open(BytesIO(bytes_))
+ image.load() # to avoid "Too many open files" errors
+ if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None:
+ image = PIL.ImageOps.exif_transpose(image)
+ if self.mode and self.mode != image.mode:
+ image = image.convert(self.mode)
+ return image
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
+ from .features import Value
+
+ return (
+ self
+ if self.decode
+ else {
+ "bytes": Value("binary"),
+ "path": Value("string"),
+ }
+ )
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
+ """Cast an Arrow array to the Image arrow storage type.
+ The Arrow types that can be converted to the Image pyarrow storage type are:
+
+ - `pa.string()` - it must contain the "path" data
+ - `pa.binary()` - it must contain the image bytes
+ - `pa.struct({"bytes": pa.binary()})`
+ - `pa.struct({"path": pa.string()})`
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
+ - `pa.list(*)` - it must contain the image array data
+
+ Args:
+ storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.StructArray`: Array in the Image arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+ if pa.types.is_string(storage.type):
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_binary(storage.type):
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_struct(storage.type):
+ if storage.type.get_field_index("bytes") >= 0:
+ bytes_array = storage.field("bytes")
+ else:
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ if storage.type.get_field_index("path") >= 0:
+ path_array = storage.field("path")
+ else:
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_list(storage.type):
+ bytes_array = pa.array(
+ [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
+ type=pa.binary(),
+ )
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays(
+ [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
+ )
+ return array_cast(storage, self.pa_type)
+
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
+ """Embed image files into the Arrow array.
+
+ Args:
+ storage (`pa.StructArray`):
+ PyArrow array to embed.
+
+ Returns:
+ `pa.StructArray`: Array in the Image arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+
+ @no_op_if_value_is_null
+ def path_to_bytes(path):
+ with xopen(path, "rb") as f:
+ bytes_ = f.read()
+ return bytes_
+
+ bytes_array = pa.array(
+ [
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
+ for x in storage.to_pylist()
+ ],
+ type=pa.binary(),
+ )
+ path_array = pa.array(
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
+ type=pa.string(),
+ )
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
+ return array_cast(storage, self.pa_type)
+
+
+def list_image_compression_formats() -> List[str]:
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ global _IMAGE_COMPRESSION_FORMATS
+ if _IMAGE_COMPRESSION_FORMATS is None:
+ PIL.Image.init()
+ _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
+ return _IMAGE_COMPRESSION_FORMATS
+
+
+def image_to_bytes(image: "PIL.Image.Image") -> bytes:
+ """Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
+ buffer = BytesIO()
+ if image.format in list_image_compression_formats():
+ format = image.format
+ else:
+ format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
+ image.save(buffer, format=format)
+ return buffer.getvalue()
+
+
+def encode_pil_image(image: "PIL.Image.Image") -> dict:
+ if hasattr(image, "filename") and image.filename != "":
+ return {"path": image.filename, "bytes": None}
+ else:
+ return {"path": None, "bytes": image_to_bytes(image)}
+
+
+def encode_np_array(array: np.ndarray) -> dict:
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ dtype = array.dtype
+ dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
+ dtype_kind = dtype.kind
+ dtype_itemsize = dtype.itemsize
+
+ dest_dtype = None
+
+ # Multi-channel array case (only np.dtype("|u1") is allowed)
+ if array.shape[2:]:
+ if dtype_kind not in ["u", "i"]:
+ raise TypeError(
+ f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
+ )
+ dest_dtype = np.dtype("|u1")
+ if dtype != dest_dtype:
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
+ # Exact match
+ elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
+ dest_dtype = dtype
+ else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
+ while dtype_itemsize >= 1:
+ dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
+ if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
+ dest_dtype = np.dtype(dtype_str)
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
+ break
+ else:
+ dtype_itemsize //= 2
+ if dest_dtype is None:
+ raise TypeError(
+ f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
+ )
+
+ image = PIL.Image.fromarray(array.astype(dest_dtype))
+ return {"path": None, "bytes": image_to_bytes(image)}
+
+
+def objects_to_list_of_image_dicts(
+ objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
+) -> List[dict]:
+ """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ if objs:
+ _, obj = first_non_null_value(objs)
+ if isinstance(obj, str):
+ return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
+ if isinstance(obj, np.ndarray):
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
+ return [obj_to_image_dict_func(obj) for obj in objs]
+ elif isinstance(obj, PIL.Image.Image):
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
+ return [obj_to_image_dict_func(obj) for obj in objs]
+ else:
+ return objs
+ else:
+ return objs
diff --git a/venv/lib/python3.10/site-packages/datasets/features/translation.py b/venv/lib/python3.10/site-packages/datasets/features/translation.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d3eb1af4bbb15397afe4f1e0a5afd54060fcda3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/features/translation.py
@@ -0,0 +1,129 @@
+from dataclasses import dataclass, field
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
+
+import pyarrow as pa
+
+
+if TYPE_CHECKING:
+ from .features import FeatureType
+
+
+@dataclass
+class Translation:
+ """`FeatureConnector` for translations with fixed languages per example.
+ Here for compatiblity with tfds.
+
+ Args:
+ languages (`dict`):
+ A dictionary for each example mapping string language codes to string translations.
+
+ Example:
+
+ ```python
+ >>> # At construction time:
+ >>> datasets.features.Translation(languages=['en', 'fr', 'de'])
+ >>> # During data generation:
+ >>> yield {
+ ... 'en': 'the cat',
+ ... 'fr': 'le chat',
+ ... 'de': 'die katze'
+ ... }
+ ```
+ """
+
+ languages: List[str]
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "dict"
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="Translation", init=False, repr=False)
+
+ def __call__(self):
+ return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """Flatten the Translation feature into a dictionary."""
+ from .features import Value
+
+ return {k: Value("string") for k in sorted(self.languages)}
+
+
+@dataclass
+class TranslationVariableLanguages:
+ """`FeatureConnector` for translations with variable languages per example.
+ Here for compatiblity with tfds.
+
+ Args:
+ languages (`dict`):
+ A dictionary for each example mapping string language codes to one or more string translations.
+ The languages present may vary from example to example.
+
+ Returns:
+ - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
+ Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
+
+ Example:
+
+ ```python
+ >>> # At construction time:
+ >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
+ >>> # During data generation:
+ >>> yield {
+ ... 'en': 'the cat',
+ ... 'fr': ['le chat', 'la chatte,']
+ ... 'de': 'die katze'
+ ... }
+ >>> # Tensor returned :
+ >>> {
+ ... 'language': ['en', 'de', 'fr', 'fr'],
+ ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
+ ... }
+ ```
+ """
+
+ languages: Optional[List] = None
+ num_languages: Optional[int] = None
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "dict"
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
+
+ def __post_init__(self):
+ self.languages = sorted(set(self.languages)) if self.languages else None
+ self.num_languages = len(self.languages) if self.languages else None
+
+ def __call__(self):
+ return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
+
+ def encode_example(self, translation_dict):
+ lang_set = set(self.languages)
+ if set(translation_dict) == {"language", "translation"}:
+ return translation_dict
+ elif self.languages and set(translation_dict) - lang_set:
+ raise ValueError(
+ f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
+ )
+
+ # Convert dictionary into tuples, splitting out cases where there are
+ # multiple translations for a single language.
+ translation_tuples = []
+ for lang, text in translation_dict.items():
+ if isinstance(text, str):
+ translation_tuples.append((lang, text))
+ else:
+ translation_tuples.extend([(lang, el) for el in text])
+
+ # Ensure translations are in ascending order by language code.
+ languages, translations = zip(*sorted(translation_tuples))
+
+ return {"language": languages, "translation": translations}
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """Flatten the TranslationVariableLanguages feature into a dictionary."""
+ from .features import Sequence, Value
+
+ return {
+ "language": Sequence(Value("string")),
+ "translation": Sequence(Value("string")),
+ }
diff --git a/venv/lib/python3.10/site-packages/datasets/filesystems/__init__.py b/venv/lib/python3.10/site-packages/datasets/filesystems/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2753e3d380f8b5212eb3434315f51f3afc85d05
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/filesystems/__init__.py
@@ -0,0 +1,69 @@
+import importlib
+import shutil
+import warnings
+from typing import List
+
+import fsspec
+import fsspec.asyn
+from fsspec.implementations.local import LocalFileSystem
+
+from ..utils.deprecation_utils import deprecated
+from . import compression
+
+
+_has_s3fs = importlib.util.find_spec("s3fs") is not None
+
+if _has_s3fs:
+ from .s3filesystem import S3FileSystem # noqa: F401
+
+COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
+ compression.Bz2FileSystem,
+ compression.GzipFileSystem,
+ compression.Lz4FileSystem,
+ compression.XzFileSystem,
+ compression.ZstdFileSystem,
+]
+
+# Register custom filesystems
+for fs_class in COMPRESSION_FILESYSTEMS:
+ if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
+ warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
+ fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
+
+
+@deprecated(
+ "This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
+)
+def extract_path_from_uri(dataset_path: str) -> str:
+ """
+ Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
+ """
+ if "://" in dataset_path:
+ dataset_path = dataset_path.split("://")[1]
+ return dataset_path
+
+
+def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
+ """
+ Checks if `fs` is a remote filesystem.
+
+ Args:
+ fs (`fsspec.spec.AbstractFileSystem`):
+ An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
+ """
+ return not isinstance(fs, LocalFileSystem)
+
+
+def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
+ """
+ Renames the file `src` in `fs` to `dst`.
+ """
+ if not is_remote_filesystem(fs):
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
+ shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
+ else:
+ fs.mv(src, dst, recursive=True)
diff --git a/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a578eb31ce8272fc8c6d572e762fa57dc1d01c95
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aaef71248c39780414c3e7f1d43bc42d852ba746
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b9138d575165137735627a181ec379946412b0d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/filesystems/compression.py b/venv/lib/python3.10/site-packages/datasets/filesystems/compression.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3721dad5f505d05838e11744f5795fbb3cecf09
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/filesystems/compression.py
@@ -0,0 +1,123 @@
+import os
+from typing import Optional
+
+import fsspec
+from fsspec.archive import AbstractArchiveFileSystem
+
+
+class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
+ """Read contents of compressed file as a filesystem with one file inside."""
+
+ root_marker = ""
+ protocol: str = (
+ None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
+ )
+ compression: str = None # compression type in fsspec. ex: "gzip"
+ extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
+
+ def __init__(
+ self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
+ ):
+ """
+ The compressed file system can be instantiated from any compressed file.
+ It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
+
+ The single file inside the filesystem is named after the compresssed file,
+ without the compression extension at the end of the filename.
+
+ Args:
+ fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
+ mode (:obj:``str``): Currently, only 'rb' accepted
+ target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
+ target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
+ """
+ super().__init__(self, **kwargs)
+ # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
+ self.file = fsspec.open(
+ fo,
+ mode="rb",
+ protocol=target_protocol,
+ compression=self.compression,
+ client_kwargs={
+ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
+ "trust_env": True, # Enable reading proxy env variables.
+ **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
+ },
+ **(target_options or {}),
+ )
+ self.compressed_name = os.path.basename(self.file.path.split("::")[0])
+ self.uncompressed_name = (
+ self.compressed_name[: self.compressed_name.rindex(".")]
+ if "." in self.compressed_name
+ else self.compressed_name
+ )
+ self.dir_cache = None
+
+ @classmethod
+ def _strip_protocol(cls, path):
+ # compressed file paths are always relative to the archive root
+ return super()._strip_protocol(path).lstrip("/")
+
+ def _get_dirs(self):
+ if self.dir_cache is None:
+ f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
+ self.dir_cache = {f["name"]: f}
+
+ def cat(self, path: str):
+ return self.file.open().read()
+
+ def _open(
+ self,
+ path: str,
+ mode: str = "rb",
+ block_size=None,
+ autocommit=True,
+ cache_options=None,
+ **kwargs,
+ ):
+ path = self._strip_protocol(path)
+ if mode != "rb":
+ raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
+ return self.file.open()
+
+
+class Bz2FileSystem(BaseCompressedFileFileSystem):
+ """Read contents of BZ2 file as a filesystem with one file inside."""
+
+ protocol = "bz2"
+ compression = "bz2"
+ extension = ".bz2"
+
+
+class GzipFileSystem(BaseCompressedFileFileSystem):
+ """Read contents of GZIP file as a filesystem with one file inside."""
+
+ protocol = "gzip"
+ compression = "gzip"
+ extension = ".gz"
+
+
+class Lz4FileSystem(BaseCompressedFileFileSystem):
+ """Read contents of LZ4 file as a filesystem with one file inside."""
+
+ protocol = "lz4"
+ compression = "lz4"
+ extension = ".lz4"
+
+
+class XzFileSystem(BaseCompressedFileFileSystem):
+ """Read contents of .xz (LZMA) file as a filesystem with one file inside."""
+
+ protocol = "xz"
+ compression = "xz"
+ extension = ".xz"
+
+
+class ZstdFileSystem(BaseCompressedFileFileSystem):
+ """
+ Read contents of .zstd file as a filesystem with one file inside.
+ """
+
+ protocol = "zstd"
+ compression = "zstd"
+ extension = ".zst"
diff --git a/venv/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py b/venv/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d204f1f8738e51411cacac0201fd67e5c185422
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py
@@ -0,0 +1,116 @@
+import s3fs
+
+from ..utils.deprecation_utils import deprecated
+
+
+@deprecated("Use s3fs.S3FileSystem instead.")
+class S3FileSystem(s3fs.S3FileSystem):
+ """
+ `datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html).
+
+ Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`.
+
+ Args:
+ anon (`bool`, default to `False`):
+ Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given,
+ or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order).
+ key (`str`):
+ If not anonymous, use this access key ID, if specified.
+ secret (`str`):
+ If not anonymous, use this secret access key, if specified.
+ token (`str`):
+ If not anonymous, use this security token, if specified.
+ use_ssl (`bool`, defaults to `True`):
+ Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is
+ also set in `client_kwargs`, the value set in `client_kwargs` will take priority.
+ s3_additional_kwargs (`dict`):
+ Parameters that are used when calling S3 API methods. Typically used for things
+ like ServerSideEncryption.
+ client_kwargs (`dict`):
+ Parameters for the botocore client.
+ requester_pays (`bool`, defaults to `False`):
+ Whether `RequesterPays` buckets are supported.
+ default_block_size (`int`):
+ If given, the default block size value used for `open()`, if no specific value is given at all time.
+ The built-in default is 5MB.
+ default_fill_cache (`bool`, defaults to `True`):
+ Whether to use cache filling with open by default. Refer to `S3File.open`.
+ default_cache_type (`str`, defaults to `bytes`):
+ If given, the default `cache_type` value used for `open()`. Set to `none` if no
+ caching is desired. See fsspec's documentation for other available `cache_type` values.
+ version_aware (`bool`, defaults to `False`):
+ Whether to support bucket versioning. If enable this will require the user to have
+ the necessary IAM permissions for dealing with versioned objects.
+ cache_regions (`bool`, defaults to `False`):
+ Whether to cache bucket regions. Whenever a new bucket is used, it will
+ first find out which region it belongs to and then use the client for that region.
+ asynchronous (`bool`, defaults to `False`):
+ Whether this instance is to be used from inside coroutines.
+ config_kwargs (`dict`):
+ Parameters passed to `botocore.client.Config`.
+ **kwargs:
+ Other parameters for core session.
+ session (`aiobotocore.session.AioSession`):
+ Session to be used for all connections. This session will be used inplace of creating
+ a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`.
+ skip_instance_cache (`bool`):
+ Control reuse of instances. Passed on to `fsspec`.
+ use_listings_cache (`bool`):
+ Control reuse of directory listings. Passed on to `fsspec`.
+ listings_expiry_time (`int` or `float`):
+ Control reuse of directory listings. Passed on to `fsspec`.
+ max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`.
+
+ Examples:
+
+ Listing files from public S3 bucket.
+
+ ```py
+ >>> import datasets
+ >>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP
+ >>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP
+ ['dataset_info.json.json','dataset.arrow','state.json']
+ ```
+
+ Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`.
+
+ ```py
+ >>> import datasets
+ >>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP
+ ['dataset_info.json.json','dataset.arrow','state.json']
+ ```
+
+ Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`.
+
+ ```py
+ >>> import botocore
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> s3_session = botocore.session.Session(profile_name='my_profile_name')
+ >>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP
+ ```
+
+ Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`].
+
+ ```py
+ >>> from datasets import load_from_disk
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
+ >>> print(len(dataset))
+ 25000
+ ```
+
+ Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`].
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> dataset = load_dataset("imdb")
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
+ ```
+ """
diff --git a/venv/lib/python3.10/site-packages/datasets/fingerprint.py b/venv/lib/python3.10/site-packages/datasets/fingerprint.py
new file mode 100644
index 0000000000000000000000000000000000000000..b26caff328bd799c508641fd7289c8c01a28d5f8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/fingerprint.py
@@ -0,0 +1,494 @@
+import inspect
+import os
+import random
+import shutil
+import tempfile
+import weakref
+from functools import wraps
+from pathlib import Path
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import xxhash
+
+from . import config
+from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH
+from .utils._dill import dumps
+from .utils.deprecation_utils import deprecated
+from .utils.logging import get_logger
+
+
+if TYPE_CHECKING:
+ from .arrow_dataset import Dataset
+
+
+logger = get_logger(__name__)
+
+
+# Fingerprinting allows to have one deterministic fingerprint per dataset state.
+# A dataset fingerprint is updated after each transform.
+# Re-running the same transforms on a dataset in a different session results in the same fingerprint.
+# This is possible thanks to a custom hashing function that works with most python objects.
+
+# Fingerprinting is the main mechanism that enables caching.
+# The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+
+#################
+# Caching
+#################
+
+_CACHING_ENABLED = True
+_TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempCacheDir"] = None
+_DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None
+
+
+class _TempCacheDir:
+ """
+ A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files
+ before deleting the directory itself to avoid permission errors on Windows.
+ """
+
+ def __init__(self):
+ self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX)
+ self._finalizer = weakref.finalize(self, self._cleanup)
+
+ def _cleanup(self):
+ for dset in get_datasets_with_cache_file_in_temp_dir():
+ dset.__del__()
+ if os.path.exists(self.name):
+ try:
+ shutil.rmtree(self.name)
+ except Exception as e:
+ raise OSError(
+ f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually."
+ ) from e
+
+ def cleanup(self):
+ if self._finalizer.detach():
+ self._cleanup()
+
+
+def maybe_register_dataset_for_temp_dir_deletion(dataset):
+ """
+ This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order
+ to properly delete them before deleting the temporary directory.
+ The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled.
+ """
+ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
+ return
+
+ global _DATASETS_WITH_TABLE_IN_TEMP_DIR
+ if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None:
+ _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet()
+ if any(
+ Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents
+ for cache_file in dataset.cache_files
+ ):
+ _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset)
+
+
+def get_datasets_with_cache_file_in_temp_dir():
+ return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else []
+
+
+def enable_caching():
+ """
+ When applying transforms on a dataset, the data are stored in cache files.
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
+ after each transform.
+
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
+ More precisely, if the caching is disabled:
+ - cache files are always recreated
+ - cache files are written to a temporary directory that is deleted when session closes
+ - cache files are named using a random hash instead of the dataset fingerprint
+ - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
+ the `download_mode` parameter in [`~datasets.load_dataset`].
+ """
+ global _CACHING_ENABLED
+ _CACHING_ENABLED = True
+
+
+def disable_caching():
+ """
+ When applying transforms on a dataset, the data are stored in cache files.
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
+ after each transform.
+
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
+ More precisely, if the caching is disabled:
+ - cache files are always recreated
+ - cache files are written to a temporary directory that is deleted when session closes
+ - cache files are named using a random hash instead of the dataset fingerprint
+ - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
+ the `download_mode` parameter in [`~datasets.load_dataset`].
+ """
+ global _CACHING_ENABLED
+ _CACHING_ENABLED = False
+
+
+@deprecated(
+ "Use datasets.enable_caching() or datasets.disable_caching() instead. This function will be removed in a future version of datasets."
+)
+def set_caching_enabled(boolean: bool):
+ """
+ When applying transforms on a dataset, the data are stored in cache files.
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
+ after each transform.
+
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
+ More precisely, if the caching is disabled:
+ - cache files are always recreated
+ - cache files are written to a temporary directory that is deleted when session closes
+ - cache files are named using a random hash instead of the dataset fingerprint
+ - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes
+ - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use
+ the ``download_mode`` parameter in :func:`datasets.load_dataset`.
+ """
+ global _CACHING_ENABLED
+ _CACHING_ENABLED = bool(boolean)
+
+
+def is_caching_enabled() -> bool:
+ """
+ When applying transforms on a dataset, the data are stored in cache files.
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
+ after each transform.
+
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
+ More precisely, if the caching is disabled:
+ - cache files are always recreated
+ - cache files are written to a temporary directory that is deleted when session closes
+ - cache files are named using a random hash instead of the dataset fingerprint
+ - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
+ the `download_mode` parameter in [`~datasets.load_dataset`].
+ """
+ global _CACHING_ENABLED
+ return bool(_CACHING_ENABLED)
+
+
+def get_temporary_cache_files_directory() -> str:
+ """Return a directory that is deleted when session closes."""
+ global _TEMP_DIR_FOR_TEMP_CACHE_FILES
+ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
+ _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempCacheDir()
+ return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name
+
+
+#################
+# Hashing
+#################
+
+
+@deprecated("Use `copyreg.pickle` to register a custom reducer.")
+def hashregister(*types):
+ def proxy(func):
+ for t in types:
+ Hasher.dispatch[t] = func
+ return func
+
+ return proxy
+
+
+class Hasher:
+ """Hasher that accepts python objects as inputs."""
+
+ dispatch: Dict = {}
+
+ def __init__(self):
+ self.m = xxhash.xxh64()
+
+ @classmethod
+ def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:
+ value = [value] if isinstance(value, bytes) else value
+ m = xxhash.xxh64()
+ for x in value:
+ m.update(x)
+ return m.hexdigest()
+
+ @classmethod
+ @deprecated("Use `Hasher.hash` instead.")
+ def hash_default(cls, value: Any) -> str:
+ return cls.hash(value)
+
+ @classmethod
+ def hash(cls, value: Any) -> str:
+ return cls.hash_bytes(dumps(value))
+
+ def update(self, value: Any) -> None:
+ header_for_update = f"=={type(value)}=="
+ value_for_update = self.hash(value)
+ self.m.update(header_for_update.encode("utf8"))
+ self.m.update(value_for_update.encode("utf-8"))
+
+ def hexdigest(self) -> str:
+ return self.m.hexdigest()
+
+
+#################
+# Fingerprinting
+#################
+
+fingerprint_rng = random.Random()
+# we show a warning only once when fingerprinting fails to avoid spam
+fingerprint_warnings: Dict[str, bool] = {}
+
+
+def generate_fingerprint(dataset: "Dataset") -> str:
+ state = dataset.__dict__
+ hasher = Hasher()
+ for key in sorted(state):
+ if key == "_fingerprint":
+ continue
+ hasher.update(key)
+ hasher.update(state[key])
+ # hash data files last modification timestamps as well
+ for cache_file in dataset.cache_files:
+ hasher.update(os.path.getmtime(cache_file["filename"]))
+ return hasher.hexdigest()
+
+
+def generate_random_fingerprint(nbits: int = 64) -> str:
+ return f"{fingerprint_rng.getrandbits(nbits):0{nbits//4}x}"
+
+
+def update_fingerprint(fingerprint, transform, transform_args):
+ global fingerprint_warnings
+ hasher = Hasher()
+ hasher.update(fingerprint)
+ try:
+ hasher.update(transform)
+ except: # noqa various errors might raise here from pickle or dill
+ if _CACHING_ENABLED:
+ if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
+ logger.warning(
+ f"Transform {transform} couldn't be hashed properly, a random hash was used instead. "
+ "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
+ "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
+ "This warning is only showed once. Subsequent hashing failures won't be showed."
+ )
+ fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
+ else:
+ logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.")
+ else:
+ logger.info(
+ f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
+ )
+
+ return generate_random_fingerprint()
+ for key in sorted(transform_args):
+ hasher.update(key)
+ try:
+ hasher.update(transform_args[key])
+ except: # noqa various errors might raise here from pickle or dill
+ if _CACHING_ENABLED:
+ if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
+ logger.warning(
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. "
+ "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
+ "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
+ "This warning is only showed once. Subsequent hashing failures won't be showed."
+ )
+ fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
+ else:
+ logger.info(
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead."
+ )
+ else:
+ logger.info(
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
+ )
+ return generate_random_fingerprint()
+ return hasher.hexdigest()
+
+
+def validate_fingerprint(fingerprint: str, max_length=64):
+ """
+ Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default,
+ so that the fingerprint can be used to name cache files without issues.
+ """
+ if not isinstance(fingerprint, str) or not fingerprint:
+ raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.")
+ for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
+ if invalid_char in fingerprint:
+ raise ValueError(
+ f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. "
+ f"They could create issues when creating cache files."
+ )
+ if len(fingerprint) > max_length:
+ raise ValueError(
+ f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}."
+ "It could create issues when creating cache files."
+ )
+
+
+def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str:
+ """
+ Format a transform to the format that will be used to update the fingerprint.
+ """
+ transform = f"{func.__module__}.{func.__qualname__}"
+ if version is not None:
+ transform += f"@{version}"
+ return transform
+
+
+def format_kwargs_for_fingerprint(
+ func: Callable,
+ args: Tuple,
+ kwargs: Dict[str, Any],
+ use_kwargs: Optional[List[str]] = None,
+ ignore_kwargs: Optional[List[str]] = None,
+ randomized_function: bool = False,
+) -> Dict[str, Any]:
+ """
+ Format the kwargs of a transform to the format that will be used to update the fingerprint.
+ """
+ kwargs_for_fingerprint = kwargs.copy()
+ if args:
+ params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD]
+ args = args[1:] # assume the first argument is the dataset
+ params = params[1:]
+ kwargs_for_fingerprint.update(zip(params, args))
+ else:
+ del kwargs_for_fingerprint[
+ next(iter(inspect.signature(func).parameters))
+ ] # assume the first key is the dataset
+
+ # keep the right kwargs to be hashed to generate the fingerprint
+
+ if use_kwargs:
+ kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs}
+ if ignore_kwargs:
+ kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs}
+ if randomized_function: # randomized functions have `seed` and `generator` parameters
+ if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ kwargs_for_fingerprint["generator"] = np.random.default_rng(seed)
+
+ # remove kwargs that are the default values
+
+ default_values = {
+ p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty
+ }
+ for default_varname, default_value in default_values.items():
+ if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value:
+ kwargs_for_fingerprint.pop(default_varname)
+ return kwargs_for_fingerprint
+
+
+def fingerprint_transform(
+ inplace: bool,
+ use_kwargs: Optional[List[str]] = None,
+ ignore_kwargs: Optional[List[str]] = None,
+ fingerprint_names: Optional[List[str]] = None,
+ randomized_function: bool = False,
+ version: Optional[str] = None,
+):
+ """
+ Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint``
+ Args:
+ inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace.
+ Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of
+ setting the fingerprint of the returned Dataset.
+ use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account
+ to update the fingerprint to the wrapped method that should take care of
+ setting the fingerprint of the returned Dataset. By default all the arguments are used.
+ ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account
+ to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs.
+ fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]):
+ If the dataset transforms is not inplace and returns a DatasetDict, then it can require
+ several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names,
+ one fingerprint named after each element of fingerprint_names is going to be passed.
+ randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has
+ optional parameters "seed" and "generator", then you can set randomized_function to True.
+ This way, even if users set "seed" and "generator" to None, then the fingerprint is
+ going to be randomly generated depending on numpy's current state. In this case, the
+ generator is set to np.random.default_rng(np.random.get_state()[1][0]).
+ version (:obj:`str`, optional): version of the transform. The version is taken into account when
+ computing the fingerprint. If a datase transform changes (or at least if the output data
+ that are cached changes), then one should increase the version. If the version stays the
+ same, then old cached data could be reused that are not compatible with the new transform.
+ It should be in the format "MAJOR.MINOR.PATCH".
+ """
+
+ if use_kwargs is not None and not isinstance(use_kwargs, list):
+ raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}")
+
+ if ignore_kwargs is not None and not isinstance(ignore_kwargs, list):
+ raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}")
+
+ if inplace and fingerprint_names:
+ raise ValueError("fingerprint_names are only used when inplace is False")
+
+ fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"]
+
+ def _fingerprint(func):
+ if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names):
+ raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature")
+
+ if randomized_function: # randomized function have seed and generator parameters
+ if "seed" not in func.__code__.co_varnames:
+ raise ValueError(f"'seed' must be in {func}'s signature")
+ if "generator" not in func.__code__.co_varnames:
+ raise ValueError(f"'generator' must be in {func}'s signature")
+ # this call has to be outside the wrapper or since __qualname__ changes in multiprocessing
+ transform = format_transform_for_fingerprint(func, version=version)
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ kwargs_for_fingerprint = format_kwargs_for_fingerprint(
+ func,
+ args,
+ kwargs,
+ use_kwargs=use_kwargs,
+ ignore_kwargs=ignore_kwargs,
+ randomized_function=randomized_function,
+ )
+
+ if args:
+ dataset: Dataset = args[0]
+ args = args[1:]
+ else:
+ dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters)))
+
+ # compute new_fingerprint and add it to the args of not in-place transforms
+ if inplace:
+ new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint)
+ else:
+ for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes
+ if kwargs.get(fingerprint_name) is None:
+ kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name
+ kwargs[fingerprint_name] = update_fingerprint(
+ dataset._fingerprint, transform, kwargs_for_fingerprint
+ )
+ else:
+ validate_fingerprint(kwargs[fingerprint_name])
+
+ # Call actual function
+
+ out = func(dataset, *args, **kwargs)
+
+ # Update fingerprint of in-place transforms + update in-place history of transforms
+
+ if inplace: # update after calling func so that the fingerprint doesn't change if the function fails
+ dataset._fingerprint = new_fingerprint
+
+ return out
+
+ wrapper._decorator_name_ = "fingerprint"
+ return wrapper
+
+ return _fingerprint
diff --git a/venv/lib/python3.10/site-packages/datasets/info.py b/venv/lib/python3.10/site-packages/datasets/info.py
new file mode 100644
index 0000000000000000000000000000000000000000..557f5b77d3f7ff754e4a9482dada99842511a160
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/info.py
@@ -0,0 +1,593 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""DatasetInfo and MetricInfo record information we know about a dataset and a metric.
+
+This includes things that we know about the dataset statically, i.e.:
+ - description
+ - canonical location
+ - does it have validation and tests splits
+ - size
+ - etc.
+
+This also includes the things that can and should be computed once we've
+processed the dataset as well:
+ - number of examples (in each split)
+ - etc.
+"""
+
+import copy
+import dataclasses
+import json
+import os
+import posixpath
+import warnings
+from dataclasses import dataclass
+from pathlib import Path
+from typing import ClassVar, Dict, List, Optional, Union
+
+import fsspec
+from fsspec.core import url_to_fs
+from huggingface_hub import DatasetCard, DatasetCardData
+
+from . import config
+from .features import Features, Value
+from .splits import SplitDict
+from .tasks import TaskTemplate, task_template_from_dict
+from .utils import Version
+from .utils.logging import get_logger
+from .utils.py_utils import asdict, unique_values
+
+
+logger = get_logger(__name__)
+
+
+@dataclass
+class SupervisedKeysData:
+ input: str = ""
+ output: str = ""
+
+
+@dataclass
+class DownloadChecksumsEntryData:
+ key: str = ""
+ value: str = ""
+
+
+class MissingCachedSizesConfigError(Exception):
+ """The expected cached sizes of the download file are missing."""
+
+
+class NonMatchingCachedSizesError(Exception):
+ """The prepared split doesn't have expected sizes."""
+
+
+@dataclass
+class PostProcessedInfo:
+ features: Optional[Features] = None
+ resources_checksums: Optional[dict] = None
+
+ def __post_init__(self):
+ # Convert back to the correct classes when we reload from dict
+ if self.features is not None and not isinstance(self.features, Features):
+ self.features = Features.from_dict(self.features)
+
+ @classmethod
+ def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names})
+
+
+@dataclass
+class DatasetInfo:
+ """Information about a dataset.
+
+ `DatasetInfo` documents datasets, including its name, version, and features.
+ See the constructor arguments and properties for a full list.
+
+ Not all fields are known on construction and may be updated later.
+
+ Attributes:
+ description (`str`):
+ A description of the dataset.
+ citation (`str`):
+ A BibTeX citation of the dataset.
+ homepage (`str`):
+ A URL to the official homepage for the dataset.
+ license (`str`):
+ The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
+ features ([`Features`], *optional*):
+ The features used to specify the dataset's column types.
+ post_processed (`PostProcessedInfo`, *optional*):
+ Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
+ supervised_keys (`SupervisedKeysData`, *optional*):
+ Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
+ builder_name (`str`, *optional*):
+ The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
+ config_name (`str`, *optional*):
+ The name of the configuration derived from [`BuilderConfig`].
+ version (`str` or [`Version`], *optional*):
+ The version of the dataset.
+ splits (`dict`, *optional*):
+ The mapping between split name and metadata.
+ download_checksums (`dict`, *optional*):
+ The mapping between the URL to download the dataset's checksums and corresponding metadata.
+ download_size (`int`, *optional*):
+ The size of the files to download to generate the dataset, in bytes.
+ post_processing_size (`int`, *optional*):
+ Size of the dataset in bytes after post-processing, if any.
+ dataset_size (`int`, *optional*):
+ The combined size in bytes of the Arrow tables for all splits.
+ size_in_bytes (`int`, *optional*):
+ The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
+ task_templates (`List[TaskTemplate]`, *optional*):
+ The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
+ """
+
+ # Set in the dataset scripts
+ description: str = dataclasses.field(default_factory=str)
+ citation: str = dataclasses.field(default_factory=str)
+ homepage: str = dataclasses.field(default_factory=str)
+ license: str = dataclasses.field(default_factory=str)
+ features: Optional[Features] = None
+ post_processed: Optional[PostProcessedInfo] = None
+ supervised_keys: Optional[SupervisedKeysData] = None
+ task_templates: Optional[List[TaskTemplate]] = None
+
+ # Set later by the builder
+ builder_name: Optional[str] = None
+ dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
+ config_name: Optional[str] = None
+ version: Optional[Union[str, Version]] = None
+ # Set later by `download_and_prepare`
+ splits: Optional[dict] = None
+ download_checksums: Optional[dict] = None
+ download_size: Optional[int] = None
+ post_processing_size: Optional[int] = None
+ dataset_size: Optional[int] = None
+ size_in_bytes: Optional[int] = None
+
+ _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
+ "config_name",
+ "download_size",
+ "dataset_size",
+ "features",
+ "splits",
+ ]
+
+ def __post_init__(self):
+ # Convert back to the correct classes when we reload from dict
+ if self.features is not None and not isinstance(self.features, Features):
+ self.features = Features.from_dict(self.features)
+ if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
+ self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
+ if self.version is not None and not isinstance(self.version, Version):
+ if isinstance(self.version, str):
+ self.version = Version(self.version)
+ else:
+ self.version = Version.from_dict(self.version)
+ if self.splits is not None and not isinstance(self.splits, SplitDict):
+ self.splits = SplitDict.from_split_dict(self.splits)
+ if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
+ if isinstance(self.supervised_keys, (tuple, list)):
+ self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
+ else:
+ self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
+
+ # Parse and make a list of templates
+ if self.task_templates is not None:
+ if isinstance(self.task_templates, (list, tuple)):
+ templates = [
+ template if isinstance(template, TaskTemplate) else task_template_from_dict(template)
+ for template in self.task_templates
+ ]
+ self.task_templates = [template for template in templates if template is not None]
+ elif isinstance(self.task_templates, TaskTemplate):
+ self.task_templates = [self.task_templates]
+ else:
+ template = task_template_from_dict(self.task_templates)
+ self.task_templates = [template] if template is not None else []
+
+ # Align task templates with features
+ if self.task_templates is not None:
+ self.task_templates = list(self.task_templates)
+ if self.features is not None:
+ self.task_templates = [
+ template.align_with_features(self.features) for template in (self.task_templates)
+ ]
+
+ def write_to_directory(
+ self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None
+ ):
+ """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
+
+ Args:
+ dataset_info_dir (`str`):
+ Destination directory.
+ pretty_print (`bool`, defaults to `False`):
+ If `True`, the JSON will be pretty-printed with the indent level of 4.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.info.write_to_directory("/path/to/directory/")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
+ with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
+ self._dump_info(f, pretty_print=pretty_print)
+ if self.license:
+ with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
+ self._dump_license(f)
+
+ def _dump_info(self, file, pretty_print=False):
+ """Dump info in `file` file-like object open in bytes mode (to support remote files)"""
+ file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8"))
+
+ def _dump_license(self, file):
+ """Dump license in `file` file-like object open in bytes mode (to support remote files)"""
+ file.write(self.license.encode("utf-8"))
+
+ @classmethod
+ def from_merge(cls, dataset_infos: List["DatasetInfo"]):
+ dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
+
+ if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos):
+ # if all dataset_infos are equal we don't need to merge. Just return the first.
+ return dataset_infos[0]
+
+ description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
+ citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
+ homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
+ license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
+ features = None
+ supervised_keys = None
+ task_templates = None
+
+ # Find common task templates across all dataset infos
+ all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None]
+ if len(all_task_templates) > 1:
+ task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:]))
+ elif len(all_task_templates):
+ task_templates = list(set(all_task_templates[0]))
+ # If no common task templates found, replace empty list with None
+ task_templates = task_templates if task_templates else None
+
+ return cls(
+ description=description,
+ citation=citation,
+ homepage=homepage,
+ license=license,
+ features=features,
+ supervised_keys=supervised_keys,
+ task_templates=task_templates,
+ )
+
+ @classmethod
+ def from_directory(
+ cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None
+ ) -> "DatasetInfo":
+ """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
+
+ This function updates all the dynamically generated fields (num_examples,
+ hash, time of creation,...) of the [`DatasetInfo`].
+
+ This will overwrite all previous metadata.
+
+ Args:
+ dataset_info_dir (`str`):
+ The directory containing the metadata file. This
+ should be the root directory of a specific dataset version.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetInfo
+ >>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
+ logger.info(f"Loading Dataset info from {dataset_info_dir}")
+ if not dataset_info_dir:
+ raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
+ with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
+ dataset_info_dict = json.load(f)
+ return cls.from_dict(dataset_info_dict)
+
+ @classmethod
+ def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names})
+
+ def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
+ self_dict = self.__dict__
+ self_dict.update(
+ **{
+ k: copy.deepcopy(v)
+ for k, v in other_dataset_info.__dict__.items()
+ if (v is not None or not ignore_none)
+ }
+ )
+
+ def copy(self) -> "DatasetInfo":
+ return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
+
+ def _to_yaml_dict(self) -> dict:
+ yaml_dict = {}
+ dataset_info_dict = asdict(self)
+ for key in dataset_info_dict:
+ if key in self._INCLUDED_INFO_IN_YAML:
+ value = getattr(self, key)
+ if hasattr(value, "_to_yaml_list"): # Features, SplitDict
+ yaml_dict[key] = value._to_yaml_list()
+ elif hasattr(value, "_to_yaml_string"): # Version
+ yaml_dict[key] = value._to_yaml_string()
+ else:
+ yaml_dict[key] = value
+ return yaml_dict
+
+ @classmethod
+ def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
+ yaml_data = copy.deepcopy(yaml_data)
+ if yaml_data.get("features") is not None:
+ yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
+ if yaml_data.get("splits") is not None:
+ yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in yaml_data.items() if k in field_names})
+
+
+class DatasetInfosDict(Dict[str, DatasetInfo]):
+ def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
+ total_dataset_infos = {}
+ dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
+ dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)
+ if not overwrite:
+ total_dataset_infos = self.from_directory(dataset_infos_dir)
+ total_dataset_infos.update(self)
+ if os.path.exists(dataset_infos_path):
+ # for backward compatibility, let's update the JSON file if it exists
+ with open(dataset_infos_path, "w", encoding="utf-8") as f:
+ dataset_infos_dict = {
+ config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
+ }
+ json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
+ # Dump the infos in the YAML part of the README.md file
+ if os.path.exists(dataset_readme_path):
+ dataset_card = DatasetCard.load(dataset_readme_path)
+ dataset_card_data = dataset_card.data
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ if total_dataset_infos:
+ total_dataset_infos.to_dataset_card_data(dataset_card_data)
+ dataset_card = (
+ DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
+ )
+ dataset_card.save(Path(dataset_readme_path))
+
+ @classmethod
+ def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
+ logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
+ # Load the info from the YAML part of README.md
+ if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)):
+ dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data
+ if "dataset_info" in dataset_card_data:
+ return cls.from_dataset_card_data(dataset_card_data)
+ if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
+ # this is just to have backward compatibility with dataset_infos.json files
+ with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
+ return cls(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ else:
+ return cls()
+
+ @classmethod
+ def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
+ if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
+ if isinstance(dataset_card_data["dataset_info"], list):
+ return cls(
+ {
+ dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
+ dataset_info_yaml_dict
+ )
+ for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
+ }
+ )
+ else:
+ dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
+ dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
+ return cls({dataset_info.config_name: dataset_info})
+ else:
+ return cls()
+
+ def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
+ if self:
+ # first get existing metadata info
+ if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
+ dataset_metadata_infos = {
+ dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
+ }
+ elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
+ dataset_metadata_infos = {
+ config_metadata["config_name"]: config_metadata
+ for config_metadata in dataset_card_data["dataset_info"]
+ }
+ else:
+ dataset_metadata_infos = {}
+ # update/rewrite existing metadata info with the one to dump
+ total_dataset_infos = {
+ **dataset_metadata_infos,
+ **{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
+ }
+ # the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
+ for config_name, dset_info_yaml_dict in total_dataset_infos.items():
+ dset_info_yaml_dict["config_name"] = config_name
+ if len(total_dataset_infos) == 1:
+ # use a struct instead of a list of configurations, since there's only one
+ dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
+ config_name = dataset_card_data["dataset_info"].pop("config_name", None)
+ if config_name != "default":
+ # if config_name is not "default" preserve it and put at the first position
+ dataset_card_data["dataset_info"] = {
+ "config_name": config_name,
+ **dataset_card_data["dataset_info"],
+ }
+ else:
+ dataset_card_data["dataset_info"] = []
+ for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
+ # add the config_name field in first position
+ dataset_info_yaml_dict.pop("config_name", None)
+ dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
+ dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
+
+
+@dataclass
+class MetricInfo:
+ """Information about a metric.
+
+ `MetricInfo` documents a metric, including its name, version, and features.
+ See the constructor arguments and properties for a full list.
+
+ Note: Not all fields are known on construction and may be updated later.
+ """
+
+ # Set in the dataset scripts
+ description: str
+ citation: str
+ features: Features
+ inputs_description: str = dataclasses.field(default_factory=str)
+ homepage: str = dataclasses.field(default_factory=str)
+ license: str = dataclasses.field(default_factory=str)
+ codebase_urls: List[str] = dataclasses.field(default_factory=list)
+ reference_urls: List[str] = dataclasses.field(default_factory=list)
+ streamable: bool = False
+ format: Optional[str] = None
+
+ # Set later by the builder
+ metric_name: Optional[str] = None
+ config_name: Optional[str] = None
+ experiment_id: Optional[str] = None
+
+ def __post_init__(self):
+ if self.format is not None:
+ for key, value in self.features.items():
+ if not isinstance(value, Value):
+ raise ValueError(
+ f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
+ f"Here {key} is an instance of {value.__class__.__name__}"
+ )
+
+ def write_to_directory(self, metric_info_dir, pretty_print=False):
+ """Write `MetricInfo` as JSON to `metric_info_dir`.
+ Also save the license separately in LICENCE.
+ If `pretty_print` is True, the JSON will be pretty-printed with the indent level of 4.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.info.write_to_directory("/path/to/directory/")
+ ```
+ """
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
+ json.dump(asdict(self), f, indent=4 if pretty_print else None)
+
+ if self.license:
+ with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
+ f.write(self.license)
+
+ @classmethod
+ def from_directory(cls, metric_info_dir) -> "MetricInfo":
+ """Create MetricInfo from the JSON file in `metric_info_dir`.
+
+ Args:
+ metric_info_dir: `str` The directory containing the metadata file. This
+ should be the root directory of a specific dataset version.
+
+ Example:
+
+ ```py
+ >>> from datasets import MetricInfo
+ >>> metric_info = MetricInfo.from_directory("/path/to/directory/")
+ ```
+ """
+ logger.info(f"Loading Metric info from {metric_info_dir}")
+ if not metric_info_dir:
+ raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.")
+
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
+ metric_info_dict = json.load(f)
+ return cls.from_dict(metric_info_dict)
+
+ @classmethod
+ def from_dict(cls, metric_info_dict: dict) -> "MetricInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
diff --git a/venv/lib/python3.10/site-packages/datasets/iterable_dataset.py b/venv/lib/python3.10/site-packages/datasets/iterable_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab03b4f486a8a66ed2be6bf53c7444cd08b32494
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/iterable_dataset.py
@@ -0,0 +1,2389 @@
+import copy
+import itertools
+import sys
+import warnings
+from collections import Counter
+from copy import deepcopy
+from dataclasses import dataclass
+from functools import partial
+from itertools import cycle, islice
+from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
+
+import fsspec.asyn
+import numpy as np
+import pyarrow as pa
+
+from . import config
+from .arrow_dataset import Dataset, DatasetInfoMixin
+from .features import Features
+from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects
+from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter
+from .info import DatasetInfo
+from .splits import NamedSplit
+from .table import cast_table_to_features, read_schema_from_file, table_cast
+from .utils.logging import get_logger
+from .utils.py_utils import Literal
+from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs
+
+
+logger = get_logger(__name__)
+
+Key = Union[int, str]
+
+
+def identity_func(x):
+ return x
+
+
+def _rename_columns_fn(example: Dict, column_mapping: Dict[str, str]):
+ if any(col not in example for col in column_mapping):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset."
+ )
+ if any(col in example for col in column_mapping.values()):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset."
+ )
+ return {
+ new_column_name: example[original_column_name]
+ for original_column_name, new_column_name in column_mapping.items()
+ }
+
+
+def add_column_fn(example: Dict, idx: int, name: str, column: List[Dict]):
+ if name in example:
+ raise ValueError(f"Error when adding {name}: column {name} is already in the dataset.")
+ return {name: column[idx]}
+
+
+def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features] = None) -> Features:
+ pa_table = pa.Table.from_pydict(batch)
+ if try_features is not None:
+ try:
+ pa_table = table_cast(pa_table, pa.schema(try_features.type))
+ except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError):
+ pass
+ return Features.from_arrow_schema(pa_table.schema)
+
+
+def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]:
+ # we order the columns by order of appearance
+ # to do so, we use a dict as an ordered set
+ cols = {col: None for example in examples for col in example}
+ # when an example is missing a column, we set the value to None with .get()
+ arrays = [[example.get(col) for example in examples] for col in cols]
+ return dict(zip(cols, arrays))
+
+
+def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]:
+ """Convert a batch (dict of examples) to examples list"""
+ n_examples = len(batch[next(iter(batch))])
+ for i in range(n_examples):
+ yield {col: array[i] for col, array in batch.items()}
+
+
+class _HasNextIterator(Iterator):
+ """Iterator with an hasnext() function. Taken from https://stackoverflow.com/questions/1966591/has-next-in-python-iterators."""
+
+ def __init__(self, it):
+ self.it = iter(it)
+ self._hasnext = None
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._hasnext:
+ result = self._thenext
+ else:
+ result = next(self.it)
+ self._hasnext = None
+ return result
+
+ def hasnext(self):
+ if self._hasnext is None:
+ try:
+ self._thenext = next(self.it)
+ except StopIteration:
+ self._hasnext = False
+ else:
+ self._hasnext = True
+ return self._hasnext
+
+
+def _convert_to_arrow(
+ iterable: Iterable[Tuple[Key, dict]],
+ batch_size: int,
+ drop_last_batch: bool = False,
+) -> Iterator[Tuple[Key, pa.Table]]:
+ """Convert and group examples in Arrow tables of size `batch_size`.
+
+ Args:
+ iterable (`Iterable[Tuple[Key, dict]]`):
+ An examples iterable containing tuples (example_key, example) of type (int/str, dict)
+ batch_size (`Optional[int]`):
+ Size of each sub-table to yield. If None or <= 0, yields the full table.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ if batch_size is None or batch_size <= 0:
+ yield (
+ "all",
+ pa.Table.from_pylist(cast_to_python_objects([example for _, example in iterable], only_1d_for_numpy=True)),
+ )
+ return
+ iterator = iter(iterable)
+ for key, example in iterator:
+ iterator_batch = islice(iterator, batch_size - 1)
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ if len(key_examples_list) < batch_size and drop_last_batch:
+ return
+ keys, examples = zip(*key_examples_list)
+ new_key = "_".join(str(key) for key in keys)
+ yield new_key, pa.Table.from_pylist(cast_to_python_objects(examples, only_1d_for_numpy=True))
+
+
+def _batch_arrow_tables(
+ iterable: Iterable[Tuple[Key, pa.Table]],
+ batch_size: Optional[int],
+ drop_last_batch: bool = False,
+) -> Iterator[Tuple[Key, pa.Table]]:
+ """Iterate over sub-tables of size `batch_size`.
+
+ Args:
+ iterable (`Iterable[Tuple[Key, pa.Table]]`):
+ A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table)
+ batch_size (`Optional[int]`):
+ Size of each sub-table to yield. If None or <= 0, yields the full table.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ if batch_size is None or batch_size <= 0:
+ yield "all", pa.concat_tables([pa_table for _, pa_table in iterable])
+ return
+ keys_buffer = []
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ for key, pa_table in iterable:
+ for chunk in pa_table.to_reader(max_chunksize=batch_size):
+ if len(chunk) == 0:
+ continue
+ elif chunks_buffer_size + len(chunk) < batch_size:
+ keys_buffer.append(key)
+ chunks_buffer.append(chunk)
+ chunks_buffer_size += len(chunk)
+ continue
+ elif chunks_buffer_size + len(chunk) == batch_size:
+ keys_buffer.append(key)
+ chunks_buffer.append(chunk)
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+ keys_buffer = []
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ else:
+ cropped_chunk_length = batch_size - chunks_buffer_size
+ keys_buffer.append(f"{key}[:{cropped_chunk_length}]")
+ chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+ keys_buffer = [f"{key}[{cropped_chunk_length}:]"]
+ chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
+ chunks_buffer_size = len(chunk) - cropped_chunk_length
+ if not drop_last_batch and chunks_buffer:
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+
+
+class _BaseExamplesIterable:
+ """Base class for the examples iterable used by an IterableDataset"""
+
+ def __init__(self) -> None:
+ self.iter_arrow: Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]] = None
+
+ def __iter__(self) -> Iterator[Tuple[Key, dict]]:
+ """An examples iterable should yield tuples (example_key, example) of type (int/str, dict)"""
+ raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet")
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable":
+ """
+ Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable.
+ If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self.
+ """
+ raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet")
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "_BaseExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet")
+
+ def split_shard_indices_by_worker(self, worker_id: int, num_workers: int) -> List[int]:
+ return list(range(worker_id, self.n_shards, num_workers))
+
+ @property
+ def n_shards(self) -> int:
+ raise NotImplementedError(f"{type(self)} doesn't implement n_shards yet")
+
+
+class ExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict):
+ super().__init__()
+ self.generate_examples_fn = generate_examples_fn
+ self.kwargs = kwargs
+
+ def __iter__(self):
+ yield from self.generate_examples_fn(**self.kwargs)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable":
+ return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
+ """Keep only the requested shard."""
+ gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
+ shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
+ requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
+ return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs)
+
+ @property
+ def n_shards(self) -> int:
+ return _number_of_shards_in_gen_kwargs(self.kwargs)
+
+
+class ShuffledDataSourcesExamplesIterable(ExamplesIterable):
+ def __init__(
+ self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator
+ ):
+ super().__init__(generate_examples_fn, kwargs)
+ self.generator = deepcopy(generator)
+
+ def __iter__(self):
+ """Shuffle the kwargs order to shuffle shards"""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ yield from self.generate_examples_fn(**kwargs_with_shuffled_shards)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
+ """Keep only the requested shard."""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources(
+ worker_id, num_workers
+ )
+
+
+class ArrowExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict):
+ super().__init__()
+ self.generate_tables_fn = generate_tables_fn
+ self.kwargs = kwargs
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ formatter = PythonFormatter()
+ for key, pa_table in self.generate_tables_fn(**self.kwargs):
+ for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
+ formatted_batch = formatter.format_batch(pa_subtable)
+ for example in _batch_to_examples(formatted_batch):
+ yield key, example
+
+ def _iter_arrow(self):
+ yield from self.generate_tables_fn(**self.kwargs)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable":
+ return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
+ """Keep only the requested shard."""
+ gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
+ shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
+ requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
+ return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs)
+
+ @property
+ def n_shards(self) -> int:
+ return _number_of_shards_in_gen_kwargs(self.kwargs)
+
+
+class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable):
+ def __init__(
+ self,
+ generate_tables_fn: Callable[..., Tuple[Key, pa.Table]],
+ kwargs: dict,
+ generator: np.random.Generator,
+ ):
+ super().__init__(generate_tables_fn, kwargs)
+ self.generator = deepcopy(generator)
+
+ def __iter__(self):
+ """Shuffle the kwargs order to shuffle shards"""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ formatter = PythonFormatter()
+ for key, pa_table in self.generate_tables_fn(**kwargs_with_shuffled_shards):
+ for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
+ formatted_batch = formatter.format_batch(pa_subtable)
+ for example in _batch_to_examples(formatted_batch):
+ yield key, example
+
+ def _iter_arrow(self):
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ yield from self.generate_tables_fn(**kwargs_with_shuffled_shards)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
+ """Keep only the requested shard."""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources(
+ worker_id, num_workers
+ )
+
+
+class SelectColumnsIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.column_names = column_names
+ if self.ex_iterable.iter_arrow:
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ for idx, row in self.ex_iterable:
+ yield idx, {c: row[c] for c in self.column_names}
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ for idx, pa_table in self.ex_iterable.iter_arrow():
+ yield idx, pa_table.select(self.column_names)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SelectColumnsIterable":
+ return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "SelectColumnsIterable":
+ return SelectColumnsIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), self.column_names)
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class StepExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.step = step
+ self.offset = offset
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ ex_iterator = iter(self.ex_iterable)
+ while True:
+ batch = list(islice(ex_iterator, self.step))
+ if len(batch) > self.offset:
+ yield batch[self.offset]
+ else:
+ break
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "StepExamplesIterable":
+ return StepExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "StepExamplesIterable":
+ return StepExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers), step=self.step, offset=self.offset
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterables: List[_BaseExamplesIterable],
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ ):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ self.stopping_strategy = stopping_strategy
+
+ # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
+ # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
+ self.bool_strategy_func = np.all if (stopping_strategy == "all_exhausted") else np.any
+ # TODO(QL): implement iter_arrow
+
+ def _get_indices_iterator(self):
+ # this is an infinite iterator to keep track of which iterator we want to pick examples from
+ return cycle(range(len(self.ex_iterables)))
+
+ def __iter__(self):
+ iterators = [_HasNextIterator(ex_iterable) for ex_iterable in self.ex_iterables]
+
+ indices_iterator = self._get_indices_iterator()
+
+ is_exhausted = np.full(len(self.ex_iterables), False)
+ for i in indices_iterator:
+ try: # let's pick one example from the iterator at index i
+ yield next(iterators[i])
+
+ # it will resume from the yield at the next call so that we can directly test if the iterable is exhausted and if we need to break out of the loop
+ if not iterators[i].hasnext():
+ is_exhausted[i] = True
+
+ if self.bool_strategy_func(is_exhausted):
+ # if the stopping criteria is met, break the main for loop
+ break
+ # otherwise reinitialise the iterator and yield the first example
+ iterators[i] = _HasNextIterator(self.ex_iterables[i])
+
+ except StopIteration:
+ # here it means that the i-th iterabledataset is empty, i.e we never have the occasion to yield an element of the i-th dataset.
+ # we still check if the stopping criteria is met and if we break out of the loop in case of an oversampling strategy
+ is_exhausted[i] = True
+
+ if self.bool_strategy_func(is_exhausted):
+ # if the stopping criteria is met, break the main for loop
+ break
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable":
+ """Shuffle each underlying examples iterable."""
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
+ return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy)
+
+ @property
+ def n_shards(self) -> int:
+ return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "CyclingMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return CyclingMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
+ stopping_strategy=self.stopping_strategy,
+ )
+
+
+class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ """
+ VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables.
+ It doesn't require the examples iterables to always yield the same columns.
+ Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
+
+ For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
+ We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
+
+ Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
+ This is done with `_apply_feature_types_on_example`.
+ """
+
+ def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ if all(ex_iterable.iter_arrow is not None for ex_iterable in ex_iterables):
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ for ex_iterable in self.ex_iterables:
+ yield from ex_iterable
+
+ def _iter_arrow(self):
+ for ex_iterable in self.ex_iterables:
+ yield from ex_iterable.iter_arrow()
+
+ def shuffle_data_sources(
+ self, generator: np.random.Generator
+ ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
+ """Shuffle the list of examples iterable, as well as each underlying examples iterable."""
+ rng = deepcopy(generator)
+ ex_iterables = list(self.ex_iterables)
+ rng.shuffle(ex_iterables)
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables]
+ return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+
+ @property
+ def n_shards(self) -> int:
+ return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
+
+ def shard_data_sources(
+ self, worker_id: int, num_workers: int
+ ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return VerticallyConcatenatedMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
+ )
+
+
+def _check_column_names(column_names: List[str]):
+ """Check the column names to make sure they don't contain duplicates."""
+ counter = Counter(column_names)
+ if not all(count == 1 for count in counter.values()):
+ duplicated_columns = [col for col in counter if counter[col] > 1]
+ raise ValueError(
+ f"The examples iterables can't have duplicated columns but columns {duplicated_columns} are duplicated."
+ )
+
+
+class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ """
+ HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables.
+ It also checks that there are no duplicate columns (otherwise we don't know which one to keep).
+ This check is done once when yielding the first example.
+
+ However it doesn't fill missing columns with None.
+ Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
+
+ For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
+ We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
+
+ Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
+ This is done with `_apply_feature_types_on_example`.
+ """
+
+ def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
+ for i in itertools.count():
+ keys = []
+ examples = []
+ for ex_iterator in list(ex_iterators):
+ try:
+ key, example = next(ex_iterator)
+ keys.append(key)
+ examples.append(example)
+ except StopIteration:
+ ex_iterators.remove(ex_iterator)
+ if ex_iterators:
+ if i == 0:
+ _check_column_names([column_name for example in examples for column_name in example])
+ new_example = {}
+ for example in examples:
+ new_example.update(example)
+ new_key = "_".join(str(key) for key in keys)
+ yield new_key, new_example
+ else:
+ break
+
+ def shuffle_data_sources(
+ self, generator: np.random.Generator
+ ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would break the alignment between them."""
+ return self
+
+ @property
+ def n_shards(self) -> int:
+ return 1
+
+ def shard_data_sources(
+ self, worker_id: int, num_workers: int
+ ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return HorizontallyConcatenatedMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
+ )
+
+
+class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable):
+ def __init__(
+ self,
+ ex_iterables: List[_BaseExamplesIterable],
+ generator: np.random.Generator,
+ probabilities: Optional[List[float]] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ ):
+ super().__init__(ex_iterables, stopping_strategy)
+ self.generator = deepcopy(generator)
+ self.probabilities = probabilities
+ # TODO(QL): implement iter_arrow
+
+ @staticmethod
+ def _iter_random_indices(
+ rng: np.random.Generator,
+ num_sources: int,
+ random_batch_size=1000,
+ p: Optional[List[float]] = None,
+ ) -> Iterator[int]:
+ """Get an infinite iterator that randomly samples the index of the source to pick examples from."""
+ if p is None:
+ while True:
+ yield from (int(i) for i in rng.integers(0, num_sources, size=random_batch_size))
+ else:
+ while True:
+ yield from (int(i) for i in rng.choice(num_sources, size=random_batch_size, p=p))
+
+ def _get_indices_iterator(self):
+ rng = deepcopy(self.generator)
+ # this is an infinite iterator that randomly samples the index of the source to pick examples from
+ return self._iter_random_indices(rng, len(self.ex_iterables), p=self.probabilities)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable":
+ """Shuffle the data sources of each wrapped examples iterable."""
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
+ return RandomlyCyclingMultiSourcesExamplesIterable(
+ ex_iterables,
+ generator=generator,
+ probabilities=self.probabilities,
+ stopping_strategy=self.stopping_strategy,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "RandomlyCyclingMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return RandomlyCyclingMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
+ self.generator,
+ self.probabilities,
+ self.stopping_strategy,
+ )
+
+
+class MappedExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[List[str]] = None,
+ fn_kwargs: Optional[dict] = None,
+ formatting: Optional["FormattingConfig"] = None,
+ format_type="deprecated",
+ ):
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.drop_last_batch = drop_last_batch
+ self.remove_columns = remove_columns
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+ self.fn_kwargs = fn_kwargs or {}
+ self.formatting = formatting
+ if self.formatting and self.formatting.format_type == "arrow":
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ if self.formatting and self.formatting.format_type == "arrow":
+ yield from ArrowExamplesIterable(self._iter_arrow, {})
+ else:
+ yield from self._iter()
+
+ def _iter(self):
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+
+ if self.formatting:
+ formatter = get_formatter(self.formatting.format_type)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self.batched:
+ for key, example in iterator:
+ # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
+ iterator_batch = (
+ iterator
+ if self.batch_size is None or self.batch_size <= 0
+ else islice(iterator, self.batch_size - 1)
+ )
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ keys, examples = zip(*key_examples_list)
+ if (
+ self.drop_last_batch
+ and self.batch_size is not None
+ and self.batch_size > 0
+ and len(examples) < self.batch_size
+ ): # ignore last batch
+ return
+ batch = _examples_to_batch(examples)
+ batch = format_dict(batch) if format_dict else batch
+ # then apply the transform
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ transformed_batch = dict(batch) # this will be updated with the function output
+ transformed_batch.update(self.function(*function_args, **self.fn_kwargs))
+ # then remove the unwanted columns
+ if self.remove_columns:
+ for c in self.remove_columns:
+ del transformed_batch[c]
+ if transformed_batch:
+ first_col = next(iter(transformed_batch))
+ bad_cols = [
+ col
+ for col in transformed_batch
+ if len(transformed_batch[col]) != len(transformed_batch[first_col])
+ ]
+ if bad_cols:
+ raise ValueError(
+ f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} while {first_col} has length {len(transformed_batch[first_col])}."
+ )
+ # the new key is the concatenation of the examples keys from the batch
+ new_key = "_".join(str(key) for key in keys)
+ # yield one example at a time from the transformed batch
+ for example in _batch_to_examples(transformed_batch):
+ yield new_key, example
+ current_idx += 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the transform and yield the example directly
+ # first copy the example, since we might drop some keys
+ example = dict(example)
+ example = format_dict(example) if format_dict else example
+ # then apply the transform
+ inputs = example
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ transformed_example = dict(example) # this will be updated with the function output
+ transformed_example.update(self.function(*function_args, **self.fn_kwargs))
+ # then we remove the unwanted columns
+ if self.remove_columns:
+ for c in self.remove_columns:
+ del transformed_example[c]
+ yield key, transformed_example
+ current_idx += 1
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ if self.ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ self.ex_iterable.iter_arrow(),
+ batch_size=self.batch_size if self.batched else 1,
+ drop_last_batch=self.drop_last_batch,
+ )
+ else:
+ iterator = _convert_to_arrow(
+ self.ex_iterable,
+ batch_size=self.batch_size if self.batched else 1,
+ drop_last_batch=self.drop_last_batch,
+ )
+ current_idx = 0
+ for key, pa_table in iterator:
+ # first build the batch
+ function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
+ if self.with_indices:
+ if self.batched:
+ function_args.append([current_idx + i for i in range(len(pa_table))])
+ else:
+ function_args.append(current_idx)
+ # then apply the transform
+ output_table = self.function(*function_args, **self.fn_kwargs)
+ if not isinstance(output_table, pa.Table):
+ raise TypeError(
+ f"Provided `function` which is applied to pyarrow tables returns a variable of type {type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset."
+ )
+ # we don't need to merge results for consistency with Dataset.map which merges iif both input and output are dicts
+ # then remove the unwanted columns
+ if self.remove_columns:
+ for column in self.remove_columns:
+ if column in output_table.column_names:
+ output_table = output_table.remove_column(output_table.column_names.index(column))
+ # return output
+ yield key, output_table
+ current_idx += len(pa_table)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return MappedExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ drop_last_batch=self.drop_last_batch,
+ remove_columns=self.remove_columns,
+ fn_kwargs=self.fn_kwargs,
+ formatting=self.formatting,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "MappedExamplesIterable":
+ """Keep only the requested shard."""
+ return MappedExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ drop_last_batch=self.drop_last_batch,
+ remove_columns=self.remove_columns,
+ fn_kwargs=self.fn_kwargs,
+ formatting=self.formatting,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class FilteredExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ formatting: Optional["FormattingConfig"] = None,
+ format_type="deprecated",
+ ):
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+ self.fn_kwargs = fn_kwargs or {}
+ self.formatting = formatting
+ if self.formatting and self.formatting.format_type == "arrow":
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ if self.formatting and self.formatting.format_type == "arrow":
+ yield from ArrowExamplesIterable(self._iter_arrow, {})
+ else:
+ yield from self._iter()
+
+ def _iter(self):
+ if self.formatting:
+ formatter = get_formatter(self.formatting.format_type)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+ if self.batched:
+ for key, example in iterator:
+ # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
+ iterator_batch = (
+ iterator
+ if self.batch_size is None or self.batch_size <= 0
+ else islice(iterator, self.batch_size - 1)
+ )
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ keys, examples = zip(*key_examples_list)
+ batch = _examples_to_batch(examples)
+ batch = format_dict(batch) if format_dict else batch
+ # then compute the mask for the batch
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ mask = self.function(*function_args, **self.fn_kwargs)
+ # yield one example at a time from the batch
+ for key_example, to_keep in zip(key_examples_list, mask):
+ if to_keep:
+ yield key_example
+ current_idx += 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the filtering function direcly
+ example = dict(example)
+ inputs = format_dict(example) if format_dict else example
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ to_keep = self.function(*function_args, **self.fn_kwargs)
+ if to_keep:
+ yield key, example
+ current_idx += 1
+
+ def _iter_arrow(self):
+ if self.ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ self.ex_iterable.iter_arrow(), batch_size=self.batch_size if self.batched else 1
+ )
+ else:
+ iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1)
+ current_idx = 0
+ for key, pa_table in iterator:
+ # first build the batch
+ function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
+ if self.with_indices:
+ if self.batched:
+ function_args.append([current_idx + i for i in range(len(pa_table))])
+ else:
+ function_args.append(current_idx)
+ # then apply the transform
+ mask = self.function(*function_args, **self.fn_kwargs)
+ # yield the filtered table
+ if self.batched:
+ yield key, pa_table.filter(mask)
+ elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask:
+ yield key, pa_table
+ current_idx += len(pa_table)
+
+ def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(seed),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "FilteredExamplesIterable":
+ """Keep only the requested shard."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class BufferShuffledExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.buffer_size = buffer_size
+ self.generator = generator
+ # TODO(QL): implement iter_arrow
+
+ @staticmethod
+ def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]:
+ while True:
+ yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
+
+ def __iter__(self):
+ buffer_size = self.buffer_size
+ rng = deepcopy(self.generator)
+ indices_iterator = self._iter_random_indices(rng, buffer_size)
+ # this is the shuffle buffer that we keep in memory
+ mem_buffer = []
+ for x in self.ex_iterable:
+ if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it
+ i = next(indices_iterator)
+ yield mem_buffer[i]
+ mem_buffer[i] = x # replace the picked example by a new one
+ else: # otherwise, keep filling the buffer
+ mem_buffer.append(x)
+ # when we run out of examples, we shuffle the remaining examples in the buffer and yield them
+ rng.shuffle(mem_buffer)
+ yield from mem_buffer
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable":
+ """Shuffle the wrapped examples iterable as well as the shuffling buffer."""
+ return BufferShuffledExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "BufferShuffledExamplesIterable":
+ """Keep only the requested shard."""
+ return BufferShuffledExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ buffer_size=self.buffer_size,
+ generator=self.generator,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class SkipExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.n = n
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ yield from islice(self.ex_iterable, self.n, None)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would skip examples from other shards instead."""
+ return self
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class TakeExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.n = n
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ yield from islice(self.ex_iterable, self.n)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would take examples from other shards instead."""
+ return self
+
+ @staticmethod
+ def split_number(num, n):
+ quotient = num // n
+ remainder = num % n
+ result = [quotient] * n
+ for i in range(remainder):
+ result[i] += 1
+ return result
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "TakeExamplesIterable":
+ """Keep only the requested shard."""
+ return TakeExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ n=self.split_number(self.n, num_workers)[worker_id],
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+def _apply_feature_types_on_example(
+ example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
+) -> dict:
+ example = dict(example)
+ # add missing columns
+ for column_name in features:
+ if column_name not in example:
+ example[column_name] = None
+ # we encode the example for ClassLabel feature types for example
+ encoded_example = features.encode_example(example)
+ # Decode example for Audio feature, e.g.
+ decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id)
+ return decoded_example
+
+
+def _apply_feature_types_on_batch(
+ batch: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
+) -> dict:
+ batch = dict(batch)
+ # add missing columns
+ n_examples = len(batch[next(iter(batch))])
+ for column_name in features:
+ if column_name not in batch:
+ batch[column_name] = [None] * n_examples
+ # we encode the batch for ClassLabel feature types for example
+ encoded_batch = features.encode_batch(batch)
+ # Decode batch for Audio feature, e.g.
+ decoded_batch = features.decode_batch(encoded_batch, token_per_repo_id=token_per_repo_id)
+ return decoded_batch
+
+
+class TypedExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ features: Features,
+ token_per_repo_id: Dict[str, Union[str, bool, None]],
+ ):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.features = features
+ self.token_per_repo_id = token_per_repo_id
+ if self.ex_iterable.iter_arrow is not None:
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ # Then for each example, `TypedExamplesIterable` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ for key, example in self.ex_iterable:
+ yield (
+ key,
+ _apply_feature_types_on_example(example, self.features, token_per_repo_id=self.token_per_repo_id),
+ )
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ schema = self.features.arrow_schema
+ for key, pa_table in self.ex_iterable.iter_arrow():
+ columns = set(pa_table.column_names)
+ # add missing columns
+ for column_name in self.features:
+ if column_name not in columns:
+ col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None])
+ pa_table = pa_table.append_column(column_name, col)
+ if pa_table.schema != schema:
+ pa_table = cast_table_to_features(pa_table, self.features)
+ yield key, pa_table
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "TypedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return TypedExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator),
+ features=self.features,
+ token_per_repo_id=self.token_per_repo_id,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "TypedExamplesIterable":
+ """Keep only the requested shard."""
+ return TypedExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ features=self.features,
+ token_per_repo_id=self.token_per_repo_id,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+@dataclass
+class FormattingConfig:
+ format_type: Optional[str]
+
+ def __post_init__(self):
+ if self.format_type == "pandas":
+ raise NotImplementedError(
+ "The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead."
+ )
+
+
+@dataclass
+class ShufflingConfig:
+ generator: np.random.Generator
+ _original_seed: Optional[int] = None
+
+
+@dataclass
+class DistributedConfig:
+ rank: int
+ world_size: int
+
+
+def _maybe_add_torch_iterable_dataset_parent_class(cls):
+ """Add torch.utils.data.IterableDataset as a parent class if 'torch' is available"""
+ if config.TORCH_AVAILABLE:
+ import torch.utils.data
+
+ if torch.utils.data.IterableDataset not in cls.__bases__:
+ cls.__bases__ += (torch.utils.data.IterableDataset,)
+
+
+class IterableDataset(DatasetInfoMixin):
+ """A Dataset backed by an iterable."""
+
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ formatting: Optional[FormattingConfig] = None,
+ shuffling: Optional[ShufflingConfig] = None,
+ distributed: Optional[DistributedConfig] = None,
+ token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
+ format_type="deprecated",
+ ):
+ if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None:
+ raise RuntimeError(
+ "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. "
+ "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. "
+ )
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+
+ info = info.copy() if info is not None else DatasetInfo()
+ DatasetInfoMixin.__init__(self, info=info, split=split)
+
+ self._ex_iterable = ex_iterable
+ self._formatting = formatting
+ self._shuffling = shuffling
+ self._distributed = distributed
+ self._epoch = 0
+ self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {}
+ _maybe_add_torch_iterable_dataset_parent_class(self.__class__)
+
+ def __repr__(self):
+ return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n n_shards: {self.n_shards}\n}})"
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling
+ _maybe_add_torch_iterable_dataset_parent_class(self.__class__)
+
+ def _head(self, n=5):
+ return _examples_to_batch(list(self.take(n)))
+
+ def _effective_generator(self):
+ if self._shuffling and self._epoch == 0:
+ return self._shuffling.generator
+ elif self._shuffling:
+ # Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars)
+ effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch
+ effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed
+ return np.random.default_rng(effective_seed)
+ else:
+ raise ValueError("This dataset is not shuffled")
+
+ @property
+ def n_shards(self) -> int:
+ if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0:
+ return self._ex_iterable.n_shards // self._distributed.world_size
+ return self._ex_iterable.n_shards
+
+ def _iter_pytorch(self):
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ # Fix for fsspec when using multiprocess to avoid hanging in the ML training loop. (only required for fsspec >= 0.9.0)
+ # See https://github.com/fsspec/gcsfs/issues/379
+ fsspec.asyn.reset_lock()
+ # check if there aren't too many workers
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers:
+ logger.warning(
+ f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). "
+ f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers."
+ )
+ logger.info(
+ f"To parallelize data loading, we give each process some shards (or data sources) to process. "
+ f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. "
+ f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}."
+ )
+ # split workload
+ _log_prefix = f"node#{self._distributed.rank} " if self._distributed else ""
+ shards_indices = ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers)
+ if shards_indices:
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards."
+ )
+ ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers)
+
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=1)
+ for key, pa_table in iterator:
+ yield formatter.format_row(pa_table)
+ return
+ else:
+ for key, example in ex_iterable:
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ example = _apply_feature_types_on_example(
+ example, self.features, token_per_repo_id=self._token_per_repo_id
+ )
+ yield format_dict(example) if format_dict else example
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards."
+ )
+ else:
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})."
+ )
+
+ def _is_main_process(self):
+ if self._distributed and self._distributed.rank > 0:
+ return False
+ if "torch" in sys.modules:
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if worker_info is not None and worker_info.id > 0:
+ return False
+ return True
+
+ def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable:
+ if self._shuffling:
+ ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator())
+ else:
+ ex_iterable = self._ex_iterable
+
+ if self._distributed:
+ rank = self._distributed.rank
+ world_size = self._distributed.world_size
+ if ex_iterable.n_shards % world_size == 0:
+ if self._is_main_process():
+ n_shards_per_node = ex_iterable.n_shards // world_size
+ plural = "s" if n_shards_per_node > 1 else ""
+ logger.info(
+ f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node."
+ )
+ ex_iterable = ex_iterable.shard_data_sources(rank, world_size)
+ else:
+ if self._is_main_process():
+ logger.info(
+ f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration."
+ )
+ logger.info(
+ f"It is more optimized to distribute the dataset shards (or data sources) across nodes. "
+ f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. "
+ f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}"
+ )
+ ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank)
+
+ return ex_iterable
+
+ def __iter__(self):
+ if "torch" in sys.modules:
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None:
+ # We're a torch.utils.data.IterableDataset in a PyTorch worker process
+ yield from self._iter_pytorch()
+ return
+
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=1)
+ for key, pa_table in iterator:
+ yield formatter.format_row(pa_table)
+ return
+
+ for key, example in ex_iterable:
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ example = _apply_feature_types_on_example(
+ example, self.features, token_per_repo_id=self._token_per_repo_id
+ )
+ yield format_dict(example) if format_dict else example
+
+ def iter(self, batch_size: int, drop_last_batch: bool = False):
+ """Iterate through the batches of size `batch_size`.
+
+ Args:
+ batch_size (:obj:`int`): size of each batch to yield.
+ drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
+ dropped
+ """
+
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch
+ )
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch)
+ for key, pa_table in iterator:
+ yield formatter.format_batch(pa_table)
+ return
+
+ iterator = iter(ex_iterable)
+ for key, example in iterator:
+ # If batched, first build the batch
+ examples = [example] + [example for key, example in islice(iterator, batch_size - 1)]
+ if drop_last_batch and len(examples) < batch_size: # ignore last batch
+ return
+ batch = _examples_to_batch(examples)
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_batch`.
+ batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id)
+ yield format_dict(batch) if format_dict else batch
+
+ @staticmethod
+ def from_generator(
+ generator: Callable,
+ features: Optional[Features] = None,
+ gen_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """Create an Iterable Dataset from a generator.
+
+ Args:
+ generator (`Callable`):
+ A generator function that `yields` examples.
+ features (`Features`, *optional*):
+ Dataset features.
+ gen_kwargs(`dict`, *optional*):
+ Keyword arguments to be passed to the `generator` callable.
+ You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`.
+ This can be used to improve shuffling and when iterating over the dataset with multiple workers.
+
+ Returns:
+ `IterableDataset`
+
+ Example:
+
+ ```py
+ >>> def gen():
+ ... yield {"text": "Good", "label": 0}
+ ... yield {"text": "Bad", "label": 1}
+ ...
+ >>> ds = IterableDataset.from_generator(gen)
+ ```
+
+ ```py
+ >>> def gen(shards):
+ ... for shard in shards:
+ ... with open(shard) as f:
+ ... for line in f:
+ ... yield {"line": line}
+ ...
+ >>> shards = [f"data{i}.txt" for i in range(32)]
+ >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
+ >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
+ >>> from torch.utils.data import DataLoader
+ >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
+ ```
+ """
+ from .io.generator import GeneratorDatasetInputStream
+
+ return GeneratorDatasetInputStream(
+ generator=generator,
+ features=features,
+ gen_kwargs=gen_kwargs,
+ streaming=True,
+ ).read()
+
+ @staticmethod
+ def from_spark(
+ df: "pyspark.sql.DataFrame",
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ **kwargs,
+ ) -> "IterableDataset":
+ """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches.
+
+ Args:
+ df (`pyspark.sql.DataFrame`):
+ The DataFrame containing the desired data.
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+
+ Returns:
+ [`IterableDataset`]
+
+ Example:
+
+ ```py
+ >>> df = spark.createDataFrame(
+ >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
+ >>> columns=["id", "name"],
+ >>> )
+ >>> ds = IterableDataset.from_spark(df)
+ ```
+ """
+ from .io.spark import SparkDatasetReader
+
+ if sys.platform == "win32":
+ raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows")
+
+ return SparkDatasetReader(
+ df,
+ split=split,
+ features=features,
+ streaming=True,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_file(filename: str) -> "IterableDataset":
+ """Instantiate a IterableDataset from Arrow table at filename.
+
+ Args:
+ filename (`str`):
+ File name of the dataset.
+
+ Returns:
+ [`IterableDataset`]
+ """
+ pa_table_schema = read_schema_from_file(filename)
+ inferred_features = Features.from_arrow_schema(pa_table_schema)
+ ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename})
+ return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features))
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ ) -> "IterableDataset":
+ """
+ Return a dataset with the specified format.
+ Supported formats: "arrow", or None for regular python objects.
+ The other formats are currently not implemented.
+
+ Args:
+
+ type (`str`, optional, default None): if set to "torch", the returned dataset
+ will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader
+ """
+ type = get_format_type_from_alias(type)
+ # TODO(QL): add format_kwargs
+ # TODO(QL): add format_columns and return_all_columns
+ # TODO(QL): add pandas format
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=FormattingConfig(format_type=type),
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ features: Optional[Features] = None,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """
+ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
+ If your function returns a column that already exists, then it overwrites it.
+ The function is applied on-the-fly on the examples when iterating over the dataset.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}.
+ - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`, *optional*, defaults to `None`):
+ Function applied on-the-fly on the examples when you iterate on the dataset.
+ It must have one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`[List[str]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ features (`[Features]`, *optional*, defaults to `None`):
+ Feature types of the resulting dataset.
+ fn_kwargs (`Dict`, *optional*, default `None`):
+ Keyword arguments to be passed to `function`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
+ ```
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+ if isinstance(remove_columns, str):
+ remove_columns = [remove_columns]
+ if function is None:
+ function = identity_func
+ if fn_kwargs is None:
+ fn_kwargs = {}
+ ex_iterable = MappedExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ fn_kwargs=fn_kwargs,
+ formatting=self._formatting,
+ )
+ info = self.info.copy()
+ info.features = features
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices=False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
+ The filtering is done on-the-fly when iterating over the dataset.
+
+ Args:
+ function (`Callable`):
+ Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
+ - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
+ - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
+ - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
+
+ If no function is provided, defaults to an always True function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, default `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ fn_kwargs (`Dict`, *optional*, default `None`):
+ Keyword arguments to be passed to `function`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> ds = ds.filter(lambda x: x["label"] == 0)
+ >>> list(ds.take(3))
+ [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'},
+ {'label': 0,
+ 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
+ {'label': 0,
+ 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
+ ```
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example)
+ info = copy.deepcopy(self._info)
+ info.features = None
+
+ # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here
+ ex_iterable = FilteredExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ fn_kwargs=fn_kwargs,
+ formatting=self._formatting,
+ )
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def shuffle(
+ self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
+ ) -> "IterableDataset":
+ """
+ Randomly shuffles the elements of this dataset.
+
+ This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer,
+ replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
+ equal to the full size of the dataset is required.
+
+ For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
+ initially select a random element from only the first 1000 elements in the buffer. Once an element is
+ selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
+ maintaining the 1000 element buffer.
+
+ If the dataset is made of several shards, it also does shuffle the order of the shards.
+ However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
+ then the order of the shards is kept unchanged.
+
+ Args:
+ seed (`int`, *optional*, defaults to `None`):
+ Random seed that will be used to shuffle the dataset.
+ It is used to sample from the shuffle buffer and also to shuffle the data shards.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ buffer_size (`int`, defaults to `1000`):
+ Size of the buffer.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> list(shuffled_ds.take(3))
+ [{'label': 1,
+ 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
+ {'label': 1,
+ 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
+ {'label': 1,
+ 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
+ ```
+ """
+ if generator is None:
+ generator = np.random.default_rng(seed)
+ else:
+ generator = deepcopy(generator)
+ shuffling = ShufflingConfig(generator=generator, _original_seed=seed)
+ return IterableDataset(
+ ex_iterable=BufferShuffledExamplesIterable(
+ self._ex_iterable, buffer_size=buffer_size, generator=generator
+ ).shuffle_data_sources(generator),
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=shuffling,
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def set_epoch(self, epoch: int):
+ self._epoch = epoch
+
+ def skip(self, n: int) -> "IterableDataset":
+ """
+ Create a new [`IterableDataset`] that skips the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to skip.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.skip(1)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'},
+ {'label': 1,
+ 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
+ ```
+ """
+ ex_iterable = SkipExamplesIterable(self._ex_iterable, n)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def take(self, n: int) -> "IterableDataset":
+ """
+ Create a new [`IterableDataset`] with only the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to take.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> small_ds = ds.take(2)
+ >>> list(small_ds)
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
+ ```
+ """
+ ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ @property
+ def column_names(self) -> Optional[List[str]]:
+ """Names of the columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
+ >>> ds.column_names
+ ['text', 'label']
+ ```
+ """
+ return list(self._info.features.keys()) if self._info.features is not None else None
+
+ def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset":
+ """Add column to Dataset.
+
+ Args:
+ name (str): Column name.
+ column (list or np.array): Column data to be added.
+
+ Returns:
+ `IterableDataset`
+ """
+ return self.map(partial(add_column_fn, name=name, column=column), with_indices=True)
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ >>> ds = ds.rename_column("text", "movie_review")
+ >>> next(iter(ds))
+ {'label': 1,
+ 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return self.rename_columns({original_column_name: new_column_name})
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with renamed columns
+ """
+
+ original_features = self._info.features.copy() if self._info.features else None
+ ds_iterable = self.map(
+ partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping)
+ )
+ if original_features is not None:
+ ds_iterable._info.features = Features(
+ {
+ column_mapping[col] if col in column_mapping.keys() else col: feature
+ for col, feature in original_features.items()
+ }
+ )
+ # check that it's still valid, especially with regard to task templates
+ try:
+ ds_iterable._info.copy()
+ except ValueError:
+ ds_iterable._info.task_templates = None
+ return ds_iterable
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+ The removal is done on-the-fly on the examples when iterating over the dataset.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
+ >>> ds = ds.remove_columns("label")
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ original_features = self._info.features.copy() if self._info.features else None
+ ds_iterable = self.map(remove_columns=column_names)
+ if original_features is not None:
+ ds_iterable._info.features = original_features.copy()
+ for col, _ in original_features.items():
+ if col in column_names:
+ del ds_iterable._info.features[col]
+ # check that it's still valid, especially with regard to task templates
+ try:
+ ds_iterable._info.copy()
+ except ValueError:
+ ds_iterable._info.task_templates = None
+
+ return ds_iterable
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
+ """Select one or several column(s) in the dataset and the features
+ associated to them. The selection is done on-the-fly on the examples
+ when iterating over the dataset.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to select.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset object with selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
+ >>> ds = ds.select_columns("text")
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ if self._info:
+ info = copy.deepcopy(self._info)
+ if self._info.features is not None:
+ missing_columns = set(column_names) - set(self._info.features.keys())
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the "
+ "dataset. Columns in the dataset: "
+ f"{list(self._info.features.keys())}."
+ )
+ info.features = Features({c: info.features[c] for c in column_names})
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+
+ ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=self._shuffling,
+ distributed=self._distributed,
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature (`Feature`):
+ Target feature.
+
+ Returns:
+ `IterableDataset`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, Audio
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True)
+ >>> ds.features
+ {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None),
+ 'english_transcription': Value(dtype='string', id=None),
+ 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
+ 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
+ 'path': Value(dtype='string', id=None),
+ 'transcription': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
+ >>> ds.features
+ {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None),
+ 'english_transcription': Value(dtype='string', id=None),
+ 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
+ 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
+ 'path': Value(dtype='string', id=None),
+ 'transcription': Value(dtype='string', id=None)}
+ ```
+ """
+ info = self._info.copy()
+ info.features[column] = feature
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def cast(
+ self,
+ features: Features,
+ ) -> "IterableDataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds.features.copy()
+ >>> new_features["label"] = ClassLabel(names=["bad", "good"])
+ >>> new_features["text"] = Value("large_string")
+ >>> ds = ds.cast(new_features)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ info = self._info.copy()
+ info.features = features
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def _step(self, step: int, offset: int) -> "IterableDataset":
+ ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def _resolve_features(self):
+ if self.features is not None:
+ return self
+ elif isinstance(self._ex_iterable, TypedExamplesIterable):
+ features = self._ex_iterable.features
+ else:
+ features = _infer_features_from_batch(self.with_format(None)._head())
+ info = self.info.copy()
+ info.features = features
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+
+def _concatenate_iterable_datasets(
+ dsets: List[IterableDataset],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+) -> IterableDataset:
+ """
+ Converts a list of `IterableDataset` with the same schema into a single `IterableDataset`.
+ Missing data are filled with None values.
+
+
+
+ Args:
+ dsets (`List[datasets.IterableDataset]`): List of Datasets to concatenate.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+ axis (``{0, 1}``, default ``0``, meaning over rows):
+ Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
+ (horizontally).
+
+ *New in version 1.6.0*
+
+ Example:
+
+ ```py
+ >>> ds3 = _concatenate_iterable_datasets([ds1, ds2])
+ ```
+ """
+ dsets = [d._resolve_features() for d in dsets]
+
+ # Perform checks (and a potentional cast if axis=0)
+ if axis == 0:
+ _check_if_features_can_be_aligned([dset.features for dset in dsets])
+ else:
+ _check_column_names([col_name for dset in dsets for col_name in dset.features])
+
+ # TODO: improve this to account for a mix of ClassLabel and Value for example
+ # right now it would keep the type of the first dataset in the list
+ features = Features(
+ {k: v for features in _align_features([dset.features for dset in dsets]) for k, v in features.items()}
+ )
+
+ ex_iterables = [d._ex_iterable for d in dsets]
+ if axis == 0:
+ ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+ else:
+ ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+ # Set new info - we update the features
+ # setting the features also ensures to fill missing columns with None
+ if info is None:
+ info = DatasetInfo.from_merge([d.info for d in dsets])
+ else:
+ info = info.copy()
+ info.features = features
+ # Get all the auth tokens per repository - in case the datasets come from different private repositories
+ token_per_repo_id = {repo_id: token for dataset in dsets for repo_id, token in dataset._token_per_repo_id.items()}
+ # Return new daset
+ return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
+
+
+def _interleave_iterable_datasets(
+ datasets: List[IterableDataset],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+) -> IterableDataset:
+ """
+ Interleave several iterable datasets (sources) into a single iterable dataset.
+ The new iterable dataset alternates between the sources to yield examples.
+ If `probabilities = None` (default) the iterable dataset will cycles through the sources in order for each next example in the iteration.
+ If `probabilities` is not `None, the iterable dataset will sample a random source according to the provided probabilities for each next examples in the iteration.
+
+
+
+ Args:
+ datasets (`List[IterableDataset]`): list of datasets to interleave
+ probabilities (`List[float]`, optional, default None): If specified, the new iterable dataset samples
+ examples from one source at a time according to these probabilities.
+ seed (`int`, optional, default None): The random seed used to choose a source for each example.
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+
+ Output:
+ `datasets.IterableDataset`
+ """
+ datasets = [d._resolve_features() for d in datasets]
+
+ # Perform checks
+ _check_if_features_can_be_aligned([dset.features for dset in datasets])
+
+ # TODO: improve this to account for a mix of ClassLabel and Value for example
+ # right now it would keep the type of the first dataset in the list
+ features = Features(
+ {k: v for features in _align_features([dset.features for dset in datasets]) for k, v in features.items()}
+ )
+
+ ex_iterables = [d._ex_iterable for d in datasets]
+
+ # Use cycling or random cycling of sources
+ if probabilities is None:
+ ex_iterable = CyclingMultiSourcesExamplesIterable(ex_iterables, stopping_strategy=stopping_strategy)
+ else:
+ generator = np.random.default_rng(seed)
+ ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
+ ex_iterables, generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy
+ )
+ # Set new info - we update the features
+ # setting the features also ensures to fill missing columns with None
+ if info is None:
+ info = DatasetInfo.from_merge([d.info for d in datasets])
+ else:
+ info = info.copy()
+ info.features = features
+ # Get all the auth tokens per repository - in case the datasets come from different private repositories
+ token_per_repo_id = {
+ repo_id: token for dataset in datasets for repo_id, token in dataset._token_per_repo_id.items()
+ }
+ # Return new daset
+ return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
+
+
+def _split_by_node_iterable_dataset(dataset: IterableDataset, rank: int, world_size: int) -> IterableDataset:
+ """
+ Split an iterable dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
+ then the shards are evenly assigned across the nodes, which is the most optimized.
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
+
+ Args:
+ dataset ([`IterableDataset`]):
+ The iterable dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`IterableDataset`]: The iterable dataset to be used on the node at rank `rank`.
+ """
+ if dataset._distributed:
+ world_size = world_size * dataset._distributed.world_size
+ rank = world_size * dataset._distributed.rank + rank
+ distributed = DistributedConfig(rank=rank, world_size=world_size)
+ return IterableDataset(
+ ex_iterable=dataset._ex_iterable,
+ info=dataset._info.copy(),
+ split=dataset._split,
+ formatting=dataset._formatting,
+ shuffling=copy.deepcopy(dataset._shuffling),
+ distributed=distributed,
+ token_per_repo_id=dataset._token_per_repo_id,
+ )
diff --git a/venv/lib/python3.10/site-packages/datasets/keyhash.py b/venv/lib/python3.10/site-packages/datasets/keyhash.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c75fcfd7ffb300aac1ffd0fc822287f21b56f8a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/keyhash.py
@@ -0,0 +1,104 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+
+"""
+Hashing function for dataset keys using `hashlib.md5`
+
+Requirements for the hash function:
+
+- Provides a uniformly distributed hash from random space
+- Adequately fast speed
+- Working with multiple input types (in this case, `str`, `int` or `bytes`)
+- Should be platform independent (generates same hash on different OS and systems)
+
+The hashing function provides a unique 128-bit integer hash of the key provided.
+
+The split name is being used here as the hash salt to avoid having same hashes
+in different splits due to same keys
+"""
+
+from typing import Union
+
+from huggingface_hub.utils import insecure_hashlib
+
+
+def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
+ """
+ Returns the input hash_data in its bytes form
+
+ Args:
+ hash_data: the hash salt/key to be converted to bytes
+ """
+ if isinstance(hash_data, bytes):
+ # Data already in bytes, returns as it as
+ return hash_data
+ elif isinstance(hash_data, str):
+ # We keep the data as it as for it ot be later encoded to UTF-8
+ # However replace `\\` with `/` for Windows compatibility
+ hash_data = hash_data.replace("\\", "/")
+ elif isinstance(hash_data, int):
+ hash_data = str(hash_data)
+ else:
+ # If data is not of the required type, raise error
+ raise InvalidKeyError(hash_data)
+
+ return hash_data.encode("utf-8")
+
+
+class InvalidKeyError(Exception):
+ """Raises an error when given key is of invalid datatype."""
+
+ def __init__(self, hash_data):
+ self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
+ self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
+ self.suffix = "\nKeys should be either str, int or bytes type"
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
+
+
+class DuplicatedKeysError(Exception):
+ """Raise an error when duplicate key found."""
+
+ def __init__(self, key, duplicate_key_indices, fix_msg=""):
+ self.key = key
+ self.duplicate_key_indices = duplicate_key_indices
+ self.fix_msg = fix_msg
+ self.prefix = "Found multiple examples generated with the same key"
+ if len(duplicate_key_indices) <= 20:
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
+ else:
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
+ self.suffix = "\n" + fix_msg if fix_msg else ""
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
+
+
+class KeyHasher:
+ """KeyHasher class for providing hash using md5"""
+
+ def __init__(self, hash_salt: str):
+ self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
+
+ def hash(self, key: Union[str, int, bytes]) -> int:
+ """Returns 128-bits unique hash of input key
+
+ Args:
+ key: the input key to be hashed (should be str, int or bytes)
+
+ Returns: 128-bit int hash key"""
+ md5 = self._split_md5.copy()
+ byte_key = _as_bytes(key)
+ md5.update(byte_key)
+ # Convert to integer with hexadecimal conversion
+ return int(md5.hexdigest(), 16)
diff --git a/venv/lib/python3.10/site-packages/datasets/load.py b/venv/lib/python3.10/site-packages/datasets/load.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd7aa401094b57a2cbe433567fe64e36ad775e07
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/load.py
@@ -0,0 +1,2699 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Access datasets."""
+
+import filecmp
+import glob
+import importlib
+import inspect
+import json
+import os
+import posixpath
+import shutil
+import signal
+import time
+import warnings
+from collections import Counter
+from contextlib import nullcontext
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
+
+import fsspec
+import requests
+import yaml
+from fsspec.core import url_to_fs
+from huggingface_hub import DatasetCard, DatasetCardData, HfApi, HfFileSystem
+
+from . import config
+from .arrow_dataset import Dataset
+from .builder import BuilderConfig, DatasetBuilder
+from .data_files import (
+ DEFAULT_PATTERNS_ALL,
+ DataFilesDict,
+ DataFilesList,
+ DataFilesPatternsDict,
+ DataFilesPatternsList,
+ EmptyDatasetError,
+ get_data_patterns,
+ get_metadata_patterns,
+ sanitize_patterns,
+)
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadMode
+from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin
+from .exceptions import DataFilesNotFoundError, DatasetNotFoundError
+from .features import Features
+from .fingerprint import Hasher
+from .info import DatasetInfo, DatasetInfosDict
+from .iterable_dataset import IterableDataset
+from .metric import Metric
+from .naming import camelcase_to_snakecase, snakecase_to_camelcase
+from .packaged_modules import (
+ _EXTENSION_TO_MODULE,
+ _MODULE_SUPPORTS_METADATA,
+ _MODULE_TO_EXTENSIONS,
+ _PACKAGED_DATASETS_MODULES,
+ _hash_python_lines,
+)
+from .splits import Split
+from .utils import _dataset_viewer
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import (
+ OfflineModeIsEnabled,
+ _raise_if_offline_mode_is_enabled,
+ cached_path,
+ head_hf_s3,
+ hf_github_url,
+ init_hf_modules,
+ is_relative_path,
+ relative_to_absolute_path,
+ url_or_path_join,
+)
+from .utils.hub import hf_dataset_url
+from .utils.info_utils import VerificationMode, is_small_dataset
+from .utils.logging import get_logger
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import get_imports, lock_importable_file
+from .utils.version import Version
+
+
+logger = get_logger(__name__)
+
+ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + [".zip"]
+
+
+def _raise_timeout_error(signum, frame):
+ raise ValueError(
+ "Loading this dataset requires you to execute custom code contained in the dataset repository on your local "
+ "machine. Please set the option `trust_remote_code=True` to permit loading of this dataset."
+ )
+
+
+def resolve_trust_remote_code(trust_remote_code: Optional[bool], repo_id: str) -> bool:
+ """
+ Copied and adapted from Transformers
+ https://github.com/huggingface/transformers/blob/2098d343cc4b4b9d2aea84b3cf1eb5a1e610deff/src/transformers/dynamic_module_utils.py#L589
+ """
+ trust_remote_code = trust_remote_code if trust_remote_code is not None else config.HF_DATASETS_TRUST_REMOTE_CODE
+ if trust_remote_code is None:
+ if config.TIME_OUT_REMOTE_CODE > 0:
+ try:
+ signal.signal(signal.SIGALRM, _raise_timeout_error)
+ signal.alarm(config.TIME_OUT_REMOTE_CODE)
+ while trust_remote_code is None:
+ answer = input(
+ f"The repository for {repo_id} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n"
+ f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n"
+ f"Do you wish to run the custom code? [y/N] "
+ )
+ if answer.lower() in ["yes", "y", "1"]:
+ trust_remote_code = True
+ elif answer.lower() in ["no", "n", "0", ""]:
+ trust_remote_code = False
+ signal.alarm(0)
+ except Exception:
+ # OS which does not support signal.SIGALRM
+ raise ValueError(
+ f"The repository for {repo_id} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n"
+ f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
+ )
+ else:
+ # For the CI which might put the timeout at 0
+ _raise_timeout_error(None, None)
+ return trust_remote_code
+
+
+def init_dynamic_modules(
+ name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
+):
+ """
+ Create a module with name `name` in which you can add dynamic modules
+ such as metrics or datasets. The module can be imported using its name.
+ The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
+ be overridden by specifying a path to another directory in `hf_modules_cache`.
+ """
+ hf_modules_cache = init_hf_modules(hf_modules_cache)
+ dynamic_modules_path = os.path.join(hf_modules_cache, name)
+ os.makedirs(dynamic_modules_path, exist_ok=True)
+ if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
+ with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
+ pass
+ return dynamic_modules_path
+
+
+def import_main_class(module_path, dataset=True) -> Optional[Union[Type[DatasetBuilder], Type[Metric]]]:
+ """Import a module at module_path and return its main class:
+ - a DatasetBuilder if dataset is True
+ - a Metric if dataset is False
+ """
+ module = importlib.import_module(module_path)
+
+ if dataset:
+ main_cls_type = DatasetBuilder
+ else:
+ main_cls_type = Metric
+
+ # Find the main class in our imported module
+ module_main_cls = None
+ for name, obj in module.__dict__.items():
+ if inspect.isclass(obj) and issubclass(obj, main_cls_type):
+ if inspect.isabstract(obj):
+ continue
+ module_main_cls = obj
+ obj_module = inspect.getmodule(obj)
+ if obj_module is not None and module == obj_module:
+ break
+
+ return module_main_cls
+
+
+class _InitializeConfiguredDatasetBuilder:
+ """
+ From https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class
+ See also ConfiguredDatasetBuilder.__reduce__
+ When called with the param value as the only argument, returns an
+ un-initialized instance of the parameterized class. Subsequent __setstate__
+ will be called by pickle.
+ """
+
+ def __call__(self, builder_cls, metadata_configs, default_config_name, name):
+ # make a simple object which has no complex __init__ (this one will do)
+ obj = _InitializeConfiguredDatasetBuilder()
+ obj.__class__ = configure_builder_class(
+ builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name
+ )
+ return obj
+
+
+def configure_builder_class(
+ builder_cls: Type[DatasetBuilder],
+ builder_configs: List[BuilderConfig],
+ default_config_name: Optional[str],
+ dataset_name: str,
+) -> Type[DatasetBuilder]:
+ """
+ Dynamically create a builder class with custom builder configs parsed from README.md file,
+ i.e. set BUILDER_CONFIGS class variable of a builder class to custom configs list.
+ """
+
+ class ConfiguredDatasetBuilder(builder_cls):
+ BUILDER_CONFIGS = builder_configs
+ DEFAULT_CONFIG_NAME = default_config_name
+
+ __module__ = builder_cls.__module__ # so that the actual packaged builder can be imported
+
+ def __reduce__(self): # to make dynamically created class pickable, see _InitializeParameterizedDatasetBuilder
+ parent_builder_cls = self.__class__.__mro__[1]
+ return (
+ _InitializeConfiguredDatasetBuilder(),
+ (
+ parent_builder_cls,
+ self.BUILDER_CONFIGS,
+ self.DEFAULT_CONFIG_NAME,
+ self.dataset_name,
+ ),
+ self.__dict__.copy(),
+ )
+
+ ConfiguredDatasetBuilder.__name__ = (
+ f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
+ )
+ ConfiguredDatasetBuilder.__qualname__ = (
+ f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
+ )
+
+ return ConfiguredDatasetBuilder
+
+
+def get_dataset_builder_class(
+ dataset_module: "DatasetModule", dataset_name: Optional[str] = None
+) -> Type[DatasetBuilder]:
+ with lock_importable_file(
+ dataset_module.importable_file_path
+ ) if dataset_module.importable_file_path else nullcontext():
+ builder_cls = import_main_class(dataset_module.module_path)
+ if dataset_module.builder_configs_parameters.builder_configs:
+ dataset_name = dataset_name or dataset_module.builder_kwargs.get("dataset_name")
+ if dataset_name is None:
+ raise ValueError("dataset_name should be specified but got None")
+ builder_cls = configure_builder_class(
+ builder_cls,
+ builder_configs=dataset_module.builder_configs_parameters.builder_configs,
+ default_config_name=dataset_module.builder_configs_parameters.default_config_name,
+ dataset_name=dataset_name,
+ )
+ return builder_cls
+
+
+def files_to_hash(file_paths: List[str]) -> str:
+ """
+ Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
+ """
+ # List all python files in directories if directories are supplied as part of external imports
+ to_use_files: List[Union[Path, str]] = []
+ for file_path in file_paths:
+ if os.path.isdir(file_path):
+ to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
+ else:
+ to_use_files.append(file_path)
+
+ # Get the code from all these files
+ lines = []
+ for file_path in to_use_files:
+ with open(file_path, encoding="utf-8") as f:
+ lines.extend(f.readlines())
+ return _hash_python_lines(lines)
+
+
+def increase_load_count(name: str, resource_type: str):
+ """Update the download count of a dataset or metric."""
+ if not config.HF_DATASETS_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
+ try:
+ head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset"))
+ except Exception:
+ pass
+
+
+def _download_additional_modules(
+ name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]
+) -> List[Tuple[str, str]]:
+ """
+ Download additional module for a module .py at URL (or local path) /.py
+ The imports must have been parsed first using ``get_imports``.
+
+ If some modules need to be installed with pip, an error is raised showing how to install them.
+ This function return the list of downloaded modules as tuples (import_name, module_file_path).
+
+ The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``.
+ """
+ local_imports = []
+ library_imports = []
+ download_config = download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading extra modules"
+ for import_type, import_name, import_path, sub_directory in imports:
+ if import_type == "library":
+ library_imports.append((import_name, import_path)) # Import from a library
+ continue
+
+ if import_name == name:
+ raise ValueError(
+ f"Error in the {name} script, importing relative {import_name} module "
+ f"but {import_name} is the name of the script. "
+ f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
+ f"comment pointing to the original relative import file path."
+ )
+ if import_type == "internal":
+ url_or_filename = url_or_path_join(base_path, import_path + ".py")
+ elif import_type == "external":
+ url_or_filename = import_path
+ else:
+ raise ValueError("Wrong import_type")
+
+ local_import_path = cached_path(
+ url_or_filename,
+ download_config=download_config,
+ )
+ if sub_directory is not None:
+ local_import_path = os.path.join(local_import_path, sub_directory)
+ local_imports.append((import_name, local_import_path))
+
+ # Check library imports
+ needs_to_be_installed = {}
+ for library_import_name, library_import_path in library_imports:
+ try:
+ lib = importlib.import_module(library_import_name) # noqa F841
+ except ImportError:
+ if library_import_name not in needs_to_be_installed or library_import_path != library_import_name:
+ needs_to_be_installed[library_import_name] = library_import_path
+ if needs_to_be_installed:
+ _dependencies_str = "dependencies" if len(needs_to_be_installed) > 1 else "dependency"
+ _them_str = "them" if len(needs_to_be_installed) > 1 else "it"
+ if "sklearn" in needs_to_be_installed.keys():
+ needs_to_be_installed["sklearn"] = "scikit-learn"
+ if "Bio" in needs_to_be_installed.keys():
+ needs_to_be_installed["Bio"] = "biopython"
+ raise ImportError(
+ f"To be able to use {name}, you need to install the following {_dependencies_str}: "
+ f"{', '.join(needs_to_be_installed)}.\nPlease install {_them_str} using 'pip install "
+ f"{' '.join(needs_to_be_installed.values())}' for instance."
+ )
+ return local_imports
+
+
+def _copy_script_and_other_resources_in_importable_dir(
+ name: str,
+ importable_directory_path: str,
+ subdirectory_name: str,
+ original_local_path: str,
+ local_imports: List[Tuple[str, str]],
+ additional_files: List[Tuple[str, str]],
+ download_mode: Optional[Union[DownloadMode, str]],
+) -> str:
+ """Copy a script and its required imports to an importable directory
+
+ Args:
+ name (str): name of the resource to load
+ importable_directory_path (str): path to the loadable folder in the dynamic modules directory
+ subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script
+ original_local_path (str): local path to the resource script
+ local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy)
+ additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy)
+ download_mode (Optional[Union[DownloadMode, str]]): download mode
+
+ Return:
+ importable_file: path to an importable module with importlib.import_module
+ """
+ # Define a directory with a unique name in our dataset or metric folder
+ # path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
+ # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
+ importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name)
+ importable_file = os.path.join(importable_subdirectory, name + ".py")
+ # Prevent parallel disk operations
+ with lock_importable_file(importable_file):
+ # Create main dataset/metrics folder if needed
+ if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path):
+ shutil.rmtree(importable_directory_path)
+ os.makedirs(importable_directory_path, exist_ok=True)
+
+ # add an __init__ file to the main dataset folder if needed
+ init_file_path = os.path.join(importable_directory_path, "__init__.py")
+ if not os.path.exists(init_file_path):
+ with open(init_file_path, "w"):
+ pass
+
+ # Create hash dataset folder if needed
+ os.makedirs(importable_subdirectory, exist_ok=True)
+ # add an __init__ file to the hash dataset folder if needed
+ init_file_path = os.path.join(importable_subdirectory, "__init__.py")
+ if not os.path.exists(init_file_path):
+ with open(init_file_path, "w"):
+ pass
+
+ # Copy dataset.py file in hash folder if needed
+ if not os.path.exists(importable_file):
+ shutil.copyfile(original_local_path, importable_file)
+ # Record metadata associating original dataset path with local unique folder
+ # Use os.path.splitext to split extension from importable_local_file
+ meta_path = os.path.splitext(importable_file)[0] + ".json"
+ if not os.path.exists(meta_path):
+ meta = {"original file path": original_local_path, "local file path": importable_file}
+ # the filename is *.py in our case, so better rename to filename.json instead of filename.py.json
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
+ json.dump(meta, meta_file)
+
+ # Copy all the additional imports
+ for import_name, import_path in local_imports:
+ if os.path.isfile(import_path):
+ full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py")
+ if not os.path.exists(full_path_local_import):
+ shutil.copyfile(import_path, full_path_local_import)
+ elif os.path.isdir(import_path):
+ full_path_local_import = os.path.join(importable_subdirectory, import_name)
+ if not os.path.exists(full_path_local_import):
+ shutil.copytree(import_path, full_path_local_import)
+ else:
+ raise ImportError(f"Error with local import at {import_path}")
+
+ # Copy additional files like dataset_infos.json file if needed
+ for file_name, original_path in additional_files:
+ destination_additional_path = os.path.join(importable_subdirectory, file_name)
+ if not os.path.exists(destination_additional_path) or not filecmp.cmp(
+ original_path, destination_additional_path
+ ):
+ shutil.copyfile(original_path, destination_additional_path)
+ return importable_file
+
+
+def _get_importable_file_path(
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+) -> str:
+ importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
+ return os.path.join(importable_directory_path, subdirectory_name, name.split("/")[-1] + ".py")
+
+
+def _create_importable_file(
+ local_path: str,
+ local_imports: List[Tuple[str, str]],
+ additional_files: List[Tuple[str, str]],
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+ download_mode: DownloadMode,
+) -> None:
+ importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
+ Path(importable_directory_path).mkdir(parents=True, exist_ok=True)
+ (Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True)
+ importable_local_file = _copy_script_and_other_resources_in_importable_dir(
+ name=name.split("/")[-1],
+ importable_directory_path=importable_directory_path,
+ subdirectory_name=subdirectory_name,
+ original_local_path=local_path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ download_mode=download_mode,
+ )
+ logger.debug(f"Created importable dataset file at {importable_local_file}")
+
+
+def _load_importable_file(
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+) -> Tuple[str, str]:
+ module_path = ".".join(
+ [
+ os.path.basename(dynamic_modules_path),
+ module_namespace,
+ name.replace("/", "--"),
+ subdirectory_name,
+ name.split("/")[-1],
+ ]
+ )
+ return module_path, subdirectory_name
+
+
+def infer_module_for_data_files_list(
+ data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], dict]:
+ """Infer module (and builder kwargs) from list of data files.
+
+ It picks the module based on the most common file extension.
+ In case of a draw ".parquet" is the favorite, and then alphabetical order.
+
+ Args:
+ data_files_list (DataFilesList): List of data files.
+ download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - dict of builder kwargs
+ """
+ extensions_counter = Counter(
+ ("." + suffix.lower(), xbasename(filepath) in ("metadata.jsonl", "metadata.csv"))
+ for filepath in data_files_list[: config.DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE]
+ for suffix in xbasename(filepath).split(".")[1:]
+ )
+ if extensions_counter:
+
+ def sort_key(ext_count: Tuple[Tuple[str, bool], int]) -> Tuple[int, bool]:
+ """Sort by count and set ".parquet" as the favorite in case of a draw, and ignore metadata files"""
+ (ext, is_metadata), count = ext_count
+ return (not is_metadata, count, ext == ".parquet", ext)
+
+ for (ext, _), _ in sorted(extensions_counter.items(), key=sort_key, reverse=True):
+ if ext in _EXTENSION_TO_MODULE:
+ return _EXTENSION_TO_MODULE[ext]
+ elif ext == ".zip":
+ return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config)
+ return None, {}
+
+
+def infer_module_for_data_files_list_in_archives(
+ data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], dict]:
+ """Infer module (and builder kwargs) from list of archive data files.
+
+ Args:
+ data_files_list (DataFilesList): List of data files.
+ download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - dict of builder kwargs
+ """
+ archived_files = []
+ archive_files_counter = 0
+ for filepath in data_files_list:
+ if str(filepath).endswith(".zip"):
+ archive_files_counter += 1
+ if archive_files_counter > config.GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE:
+ break
+ extracted = xjoin(StreamingDownloadManager().extract(filepath), "**")
+ archived_files += [
+ f.split("::")[0]
+ for f in xglob(extracted, recursive=True, download_config=download_config)[
+ : config.ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE
+ ]
+ ]
+ extensions_counter = Counter(
+ "." + suffix.lower() for filepath in archived_files for suffix in xbasename(filepath).split(".")[1:]
+ )
+ if extensions_counter:
+ most_common = extensions_counter.most_common(1)[0][0]
+ if most_common in _EXTENSION_TO_MODULE:
+ return _EXTENSION_TO_MODULE[most_common]
+ return None, {}
+
+
+def infer_module_for_data_files(
+ data_files: DataFilesDict, path: Optional[str] = None, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], Dict[str, Any]]:
+ """Infer module (and builder kwargs) from data files. Raise if module names for different splits don't match.
+
+ Args:
+ data_files ([`DataFilesDict`]): Dict of list of data files.
+ path (str, *optional*): Dataset name or path.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters to authenticate on the Hugging Face Hub for private remote files.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - builder kwargs
+ """
+ split_modules = {
+ split: infer_module_for_data_files_list(data_files_list, download_config=download_config)
+ for split, data_files_list in data_files.items()
+ }
+ module_name, default_builder_kwargs = next(iter(split_modules.values()))
+ if any((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values()):
+ raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}")
+ if not module_name:
+ raise DataFilesNotFoundError("No (supported) data files found" + (f" in {path}" if path else ""))
+ return module_name, default_builder_kwargs
+
+
+def create_builder_configs_from_metadata_configs(
+ module_path: str,
+ metadata_configs: MetadataConfigs,
+ supports_metadata: bool,
+ base_path: Optional[str] = None,
+ default_builder_kwargs: Dict[str, Any] = None,
+ download_config: Optional[DownloadConfig] = None,
+) -> Tuple[List[BuilderConfig], str]:
+ builder_cls = import_main_class(module_path)
+ builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS
+ default_config_name = metadata_configs.get_default_config_name()
+ builder_configs = []
+ default_builder_kwargs = {} if default_builder_kwargs is None else default_builder_kwargs
+
+ base_path = base_path if base_path is not None else ""
+ for config_name, config_params in metadata_configs.items():
+ config_data_files = config_params.get("data_files")
+ config_data_dir = config_params.get("data_dir")
+ config_base_path = xjoin(base_path, config_data_dir) if config_data_dir else base_path
+ try:
+ config_patterns = (
+ sanitize_patterns(config_data_files)
+ if config_data_files is not None
+ else get_data_patterns(config_base_path, download_config=download_config)
+ )
+ config_data_files_dict = DataFilesPatternsDict.from_patterns(
+ config_patterns,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ )
+ except EmptyDatasetError as e:
+ raise EmptyDatasetError(
+ f"Dataset at '{base_path}' doesn't contain data files matching the patterns for config '{config_name}',"
+ f" check `data_files` and `data_fir` parameters in the `configs` YAML field in README.md. "
+ ) from e
+ if config_data_files is None and supports_metadata and config_patterns != DEFAULT_PATTERNS_ALL:
+ try:
+ config_metadata_patterns = get_metadata_patterns(base_path, download_config=download_config)
+ except FileNotFoundError:
+ config_metadata_patterns = None
+ if config_metadata_patterns is not None:
+ config_metadata_data_files_list = DataFilesPatternsList.from_patterns(config_metadata_patterns)
+ config_data_files_dict = DataFilesPatternsDict(
+ {
+ split: data_files_list + config_metadata_data_files_list
+ for split, data_files_list in config_data_files_dict.items()
+ }
+ )
+ ignored_params = [
+ param for param in config_params if not hasattr(builder_config_cls, param) and param != "default"
+ ]
+ if ignored_params:
+ logger.warning(
+ f"Some datasets params were ignored: {ignored_params}. "
+ "Make sure to use only valid params for the dataset builder and to have "
+ "a up-to-date version of the `datasets` library."
+ )
+ builder_configs.append(
+ builder_config_cls(
+ name=config_name,
+ data_files=config_data_files_dict,
+ data_dir=config_data_dir,
+ **{
+ param: value
+ for param, value in {**default_builder_kwargs, **config_params}.items()
+ if hasattr(builder_config_cls, param) and param not in ("default", "data_files", "data_dir")
+ },
+ )
+ )
+ return builder_configs, default_config_name
+
+
+@dataclass
+class BuilderConfigsParameters:
+ """Dataclass containing objects related to creation of builder configurations from yaml's metadata content.
+
+ Attributes:
+ metadata_configs (`MetadataConfigs`, *optional*):
+ Configs parsed from yaml's metadata.
+ builder_configs (`list[BuilderConfig]`, *optional*):
+ List of BuilderConfig objects created from metadata_configs above.
+ default_config_name (`str`):
+ Name of default config taken from yaml's metadata.
+ """
+
+ metadata_configs: Optional[MetadataConfigs] = None
+ builder_configs: Optional[List[BuilderConfig]] = None
+ default_config_name: Optional[str] = None
+
+
+@dataclass
+class DatasetModule:
+ module_path: str
+ hash: str
+ builder_kwargs: dict
+ builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters)
+ dataset_infos: Optional[DatasetInfosDict] = None
+ importable_file_path: Optional[str] = None
+
+
+@dataclass
+class MetricModule:
+ module_path: str
+ hash: str
+
+
+class _DatasetModuleFactory:
+ def get_module(self) -> DatasetModule:
+ raise NotImplementedError
+
+
+class _MetricModuleFactory:
+ def get_module(self) -> MetricModule:
+ raise NotImplementedError
+
+
+class GithubMetricModuleFactory(_MetricModuleFactory):
+ """Get the module of a metric. The metric script is downloaded from GitHub.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[str] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config.copy() if download_config else DownloadConfig()
+ if self.download_config.max_retries < 3:
+ self.download_config.max_retries = 3
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+ assert self.name.count("/") == 0
+ increase_load_count(name, resource_type="metric")
+
+ def download_loading_script(self, revision: Optional[str]) -> str:
+ file_path = hf_github_url(path=self.name, name=self.name + ".py", revision=revision, dataset=False)
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading builder script"
+ return cached_path(file_path, download_config=download_config)
+
+ def get_module(self) -> MetricModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ _loading_script_url = hf_github_url(
+ path=self.name, name=self.name + ".py", revision=self.revision, dataset=False
+ )
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the metric. You can inspect the repository content at {_loading_script_url}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ revision = self.revision
+ try:
+ local_path = self.download_loading_script(revision)
+ revision = self.revision
+ except FileNotFoundError:
+ if revision is not None:
+ raise
+ else:
+ revision = "main"
+ local_path = self.download_loading_script(revision)
+ logger.warning(
+ f"Couldn't find a directory or a metric named '{self.name}' in this version. "
+ f"It was picked from the main branch on github instead."
+ )
+ imports = get_imports(local_path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=hf_github_url(path=self.name, name="", revision=revision, dataset=False),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=local_path,
+ local_imports=local_imports,
+ additional_files=[],
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+class LocalMetricModuleFactory(_MetricModuleFactory):
+ """Get the module of a local metric. The metric script is loaded from a local script.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ path: str,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[str] = None,
+ ):
+ self.path = path
+ self.name = Path(path).stem
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+
+ def get_module(self) -> MetricModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the metric. You can inspect the repository content at {self.path}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ imports = get_imports(self.path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=str(Path(self.path).parent),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([self.path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=self.path,
+ local_imports=local_imports,
+ additional_files=[],
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+class LocalDatasetModuleFactoryWithScript(_DatasetModuleFactory):
+ """Get the module of a local dataset. The dataset script is loaded from a local script."""
+
+ def __init__(
+ self,
+ path: str,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ ):
+ self.path = path
+ self.name = Path(path).stem
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+
+ def get_module(self) -> DatasetModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at {self.path}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ dataset_infos_path = Path(self.path).parent / config.DATASETDICT_INFOS_FILENAME
+ dataset_readme_path = Path(self.path).parent / config.REPOCARD_FILENAME
+ imports = get_imports(self.path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=str(Path(self.path).parent),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ additional_files = []
+ if dataset_infos_path.is_file():
+ additional_files.append((config.DATASETDICT_INFOS_FILENAME, str(dataset_infos_path)))
+ if dataset_readme_path.is_file():
+ additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([self.path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=self.path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ builder_kwargs = {"base_path": str(Path(self.path).parent)}
+ return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
+
+
+class LocalDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
+ """Get the module of a dataset loaded from the user's data files. The dataset builder module to use is inferred
+ from the data files extensions."""
+
+ def __init__(
+ self,
+ path: str,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ if data_dir and os.path.isabs(data_dir):
+ raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}")
+
+ self.path = Path(path).as_posix()
+ self.name = Path(path).stem
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_mode = download_mode
+
+ def get_module(self) -> DatasetModule:
+ readme_path = os.path.join(self.path, config.REPOCARD_FILENAME)
+ standalone_yaml_path = os.path.join(self.path, config.REPOYAML_FILENAME)
+ dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData()
+ if os.path.exists(standalone_yaml_path):
+ with open(standalone_yaml_path, "r", encoding="utf-8") as f:
+ standalone_yaml_data = yaml.safe_load(f.read())
+ if standalone_yaml_data:
+ _dataset_card_data_dict = dataset_card_data.to_dict()
+ _dataset_card_data_dict.update(standalone_yaml_data)
+ dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ # we need a set of data files to find which dataset builder to use
+ # because we need to infer module name by files extensions
+ base_path = Path(self.path, self.data_dir or "").expanduser().resolve().as_posix()
+ if self.data_files is not None:
+ patterns = sanitize_patterns(self.data_files)
+ elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
+ patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
+ else:
+ patterns = get_data_patterns(base_path)
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ base_path=base_path,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ )
+ module_name, default_builder_kwargs = infer_module_for_data_files(
+ data_files=data_files,
+ path=self.path,
+ )
+ data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
+ # Collect metadata files if the module supports them
+ supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, base_path=base_path)
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
+ if metadata_configs:
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ base_path=base_path,
+ supports_metadata=supports_metadata,
+ default_builder_kwargs=default_builder_kwargs,
+ )
+ else:
+ builder_configs: List[BuilderConfig] = [
+ import_main_class(module_path).BUILDER_CONFIG_CLASS(
+ data_files=data_files,
+ **default_builder_kwargs,
+ )
+ ]
+ default_config_name = None
+ builder_kwargs = {
+ "base_path": self.path,
+ "dataset_name": camelcase_to_snakecase(Path(self.path).name),
+ }
+ if self.data_dir:
+ builder_kwargs["data_files"] = data_files
+ # this file is deprecated and was created automatically in old versions of push_to_hub
+ if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)):
+ with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
+ legacy_dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ if len(legacy_dataset_infos) == 1:
+ # old config e.g. named "username--dataset_name"
+ legacy_config_name = next(iter(legacy_dataset_infos))
+ legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
+ legacy_dataset_infos.update(dataset_infos)
+ dataset_infos = legacy_dataset_infos
+ if default_config_name is None and len(dataset_infos) == 1:
+ default_config_name = next(iter(dataset_infos))
+
+ hash = Hasher.hash({"dataset_infos": dataset_infos, "builder_configs": builder_configs})
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class PackagedDatasetModuleFactory(_DatasetModuleFactory):
+ """Get the dataset builder module from the ones that are packaged with the library: csv, json, etc."""
+
+ def __init__(
+ self,
+ name: str,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ self.name = name
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_config = download_config
+ self.download_mode = download_mode
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ base_path = Path(self.data_dir or "").expanduser().resolve().as_posix()
+ patterns = (
+ sanitize_patterns(self.data_files)
+ if self.data_files is not None
+ else get_data_patterns(base_path, download_config=self.download_config)
+ )
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ download_config=self.download_config,
+ base_path=base_path,
+ )
+ supports_metadata = self.name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(
+ metadata_patterns, download_config=self.download_config, base_path=base_path
+ )
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, hash = _PACKAGED_DATASETS_MODULES[self.name]
+
+ builder_kwargs = {
+ "data_files": data_files,
+ "dataset_name": self.name,
+ }
+
+ return DatasetModule(module_path, hash, builder_kwargs)
+
+
+class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
+ """
+ Get the module of a dataset loaded from data files of a dataset repository.
+ The dataset builder module to use is inferred from the data files extensions.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(
+ self.name,
+ revision=self.revision,
+ token=self.download_config.token,
+ timeout=100.0,
+ )
+ # even if metadata_configs is not None (which means that we will resolve files for each config later)
+ # we cannot skip resolving all files because we need to infer module name by files extensions
+ revision = hfh_dataset_info.sha # fix the revision in case there are new commits in the meantime
+ base_path = f"hf://datasets/{self.name}@{revision}/{self.data_dir or ''}".rstrip("/")
+
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading readme"
+ try:
+ dataset_readme_path = cached_path(
+ hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ dataset_card_data = DatasetCard.load(Path(dataset_readme_path)).data
+ except FileNotFoundError:
+ dataset_card_data = DatasetCardData()
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading standalone yaml"
+ try:
+ standalone_yaml_path = cached_path(
+ hf_dataset_url(self.name, config.REPOYAML_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ with open(standalone_yaml_path, "r", encoding="utf-8") as f:
+ standalone_yaml_data = yaml.safe_load(f.read())
+ if standalone_yaml_data:
+ _dataset_card_data_dict = dataset_card_data.to_dict()
+ _dataset_card_data_dict.update(standalone_yaml_data)
+ dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
+ except FileNotFoundError:
+ pass
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ if config.USE_PARQUET_EXPORT: # maybe don't use the infos from the parquet export
+ try:
+ exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
+ dataset=self.name, revision=self.revision, token=self.download_config.token
+ )
+ exported_dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
+ for config_name in exported_dataset_infos
+ }
+ )
+ except _dataset_viewer.DatasetViewerError:
+ exported_dataset_infos = None
+ else:
+ exported_dataset_infos = None
+ if exported_dataset_infos:
+ exported_dataset_infos.update(dataset_infos)
+ dataset_infos = exported_dataset_infos
+ # we need a set of data files to find which dataset builder to use
+ # because we need to infer module name by files extensions
+ if self.data_files is not None:
+ patterns = sanitize_patterns(self.data_files)
+ elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
+ patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
+ else:
+ patterns = get_data_patterns(base_path, download_config=self.download_config)
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ base_path=base_path,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ download_config=self.download_config,
+ )
+ module_name, default_builder_kwargs = infer_module_for_data_files(
+ data_files=data_files,
+ path=self.name,
+ download_config=self.download_config,
+ )
+ data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
+ # Collect metadata files if the module supports them
+ supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(
+ metadata_patterns, download_config=self.download_config, base_path=base_path
+ )
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
+ if metadata_configs:
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ base_path=base_path,
+ supports_metadata=supports_metadata,
+ default_builder_kwargs=default_builder_kwargs,
+ download_config=self.download_config,
+ )
+ else:
+ builder_configs: List[BuilderConfig] = [
+ import_main_class(module_path).BUILDER_CONFIG_CLASS(
+ data_files=data_files,
+ **default_builder_kwargs,
+ )
+ ]
+ default_config_name = None
+ builder_kwargs = {
+ "base_path": hf_dataset_url(self.name, "", revision=revision).rstrip("/"),
+ "repo_id": self.name,
+ "dataset_name": camelcase_to_snakecase(Path(self.name).name),
+ }
+ if self.data_dir:
+ builder_kwargs["data_files"] = data_files
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading metadata"
+ try:
+ # this file is deprecated and was created automatically in old versions of push_to_hub
+ dataset_infos_path = cached_path(
+ hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ legacy_dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ if len(legacy_dataset_infos) == 1:
+ # old config e.g. named "username--dataset_name"
+ legacy_config_name = next(iter(legacy_dataset_infos))
+ legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
+ legacy_dataset_infos.update(dataset_infos)
+ dataset_infos = legacy_dataset_infos
+ except FileNotFoundError:
+ pass
+ if default_config_name is None and len(dataset_infos) == 1:
+ default_config_name = next(iter(dataset_infos))
+
+ hash = revision
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class HubDatasetModuleFactoryWithParquetExport(_DatasetModuleFactory):
+ """
+ Get the module of a dataset loaded from parquet files of a dataset repository parquet export.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config or DownloadConfig()
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ exported_parquet_files = _dataset_viewer.get_exported_parquet_files(
+ dataset=self.name, revision=self.revision, token=self.download_config.token
+ )
+ exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
+ dataset=self.name, revision=self.revision, token=self.download_config.token
+ )
+ dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
+ for config_name in exported_dataset_infos
+ }
+ )
+ hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(
+ self.name,
+ revision="refs/convert/parquet",
+ token=self.download_config.token,
+ timeout=100.0,
+ )
+ revision = hfh_dataset_info.sha # fix the revision in case there are new commits in the meantime
+ metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(
+ revision=revision, exported_parquet_files=exported_parquet_files, dataset_infos=dataset_infos
+ )
+ module_path, _ = _PACKAGED_DATASETS_MODULES["parquet"]
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ supports_metadata=False,
+ download_config=self.download_config,
+ )
+ hash = self.revision
+ builder_kwargs = {
+ "repo_id": self.name,
+ "dataset_name": camelcase_to_snakecase(Path(self.name).name),
+ }
+
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory):
+ """
+ Get the module of a dataset from a dataset repository.
+ The dataset script comes from the script inside the dataset repository.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+ increase_load_count(name, resource_type="dataset")
+
+ def download_loading_script(self) -> str:
+ file_path = hf_dataset_url(self.name, self.name.split("/")[-1] + ".py", revision=self.revision)
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading builder script"
+ return cached_path(file_path, download_config=download_config)
+
+ def download_dataset_infos_file(self) -> str:
+ dataset_infos = hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.revision)
+ # Download the dataset infos file if available
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading metadata"
+ try:
+ return cached_path(
+ dataset_infos,
+ download_config=download_config,
+ )
+ except (FileNotFoundError, ConnectionError):
+ return None
+
+ def download_dataset_readme_file(self) -> str:
+ readme_url = hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=self.revision)
+ # Download the dataset infos file if available
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading readme"
+ try:
+ return cached_path(
+ readme_url,
+ download_config=download_config,
+ )
+ except (FileNotFoundError, ConnectionError):
+ return None
+
+ def get_module(self) -> DatasetModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{self.name}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ local_path = self.download_loading_script()
+ dataset_infos_path = self.download_dataset_infos_file()
+ dataset_readme_path = self.download_dataset_readme_file()
+ imports = get_imports(local_path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=hf_dataset_url(self.name, "", revision=self.revision),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ additional_files = []
+ if dataset_infos_path:
+ additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path))
+ if dataset_readme_path:
+ additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=local_path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ builder_kwargs = {
+ "base_path": hf_dataset_url(self.name, "", revision=self.revision).rstrip("/"),
+ "repo_id": self.name,
+ }
+ return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
+
+
+class CachedDatasetModuleFactory(_DatasetModuleFactory):
+ """
+ Get the module of a dataset that has been loaded once already and cached.
+ The script that is loaded from the cache is the most recent one with a matching name.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ cache_dir: Optional[str] = None,
+ dynamic_modules_path: Optional[str] = None,
+ ):
+ self.name = name
+ self.cache_dir = cache_dir
+ self.dynamic_modules_path = dynamic_modules_path
+ assert self.name.count("/") <= 1
+
+ def get_module(self) -> DatasetModule:
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ importable_directory_path = os.path.join(dynamic_modules_path, "datasets", self.name.replace("/", "--"))
+ hashes = (
+ [h for h in os.listdir(importable_directory_path) if len(h) == 64]
+ if os.path.isdir(importable_directory_path)
+ else None
+ )
+ if hashes:
+ # get most recent
+ def _get_modification_time(module_hash):
+ return (
+ (Path(importable_directory_path) / module_hash / (self.name.split("/")[-1] + ".py"))
+ .stat()
+ .st_mtime
+ )
+
+ hash = sorted(hashes, key=_get_modification_time)[-1]
+ warning_msg = (
+ f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
+ f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
+ f"couldn't be found locally at {self.name}"
+ )
+ if not config.HF_DATASETS_OFFLINE:
+ warning_msg += ", or remotely on the Hugging Face Hub."
+ logger.warning(warning_msg)
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ builder_kwargs = {
+ "repo_id": self.name,
+ }
+ return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
+ cache_dir = os.path.expanduser(str(self.cache_dir or config.HF_DATASETS_CACHE))
+ namespace_and_dataset_name = self.name.split("/")
+ namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
+ cached_relative_path = "___".join(namespace_and_dataset_name)
+ cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
+ cached_directory_paths = [
+ cached_directory_path
+ for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
+ if os.path.isdir(cached_directory_path)
+ ]
+ if cached_directory_paths:
+ builder_kwargs = {
+ "repo_id": self.name,
+ "dataset_name": self.name.split("/")[-1],
+ }
+ warning_msg = f"Using the latest cached version of the dataset since {self.name} couldn't be found on the Hugging Face Hub"
+ if config.HF_DATASETS_OFFLINE:
+ warning_msg += " (offline mode is enabled)."
+ logger.warning(warning_msg)
+ return DatasetModule(
+ "datasets.packaged_modules.cache.cache",
+ "auto",
+ {**builder_kwargs, "version": "auto"},
+ )
+ raise FileNotFoundError(f"Dataset {self.name} is not cached in {self.cache_dir}")
+
+
+class CachedMetricModuleFactory(_MetricModuleFactory):
+ """
+ Get the module of a metric that has been loaded once already and cached.
+ The script that is loaded from the cache is the most recent one with a matching name.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ name: str,
+ dynamic_modules_path: Optional[str] = None,
+ ):
+ self.name = name
+ self.dynamic_modules_path = dynamic_modules_path
+ assert self.name.count("/") == 0
+
+ def get_module(self) -> MetricModule:
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ importable_directory_path = os.path.join(dynamic_modules_path, "metrics", self.name)
+ hashes = (
+ [h for h in os.listdir(importable_directory_path) if len(h) == 64]
+ if os.path.isdir(importable_directory_path)
+ else None
+ )
+ if not hashes:
+ raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}")
+ # get most recent
+
+ def _get_modification_time(module_hash):
+ return (Path(importable_directory_path) / module_hash / (self.name + ".py")).stat().st_mtime
+
+ hash = sorted(hashes, key=_get_modification_time)[-1]
+ logger.warning(
+ f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
+ f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
+ f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub."
+ )
+ # make the new module to be noticed by the import system
+ module_path = ".".join([os.path.basename(dynamic_modules_path), "metrics", self.name, hash, self.name])
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+def dataset_module_factory(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str, DataFilesDict]] = None,
+ cache_dir: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ _require_default_config_name=True,
+ _require_custom_configs=False,
+ **download_kwargs,
+) -> DatasetModule:
+ """
+ Download/extract/cache a dataset module.
+
+ Dataset codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
+
+ Args:
+
+ path (str): Path or name of the dataset.
+ Depending on ``path``, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if ``path`` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. ``'./path/to/directory/with/my/csv/data'``.
+ - if ``path`` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory):
+ -> load the dataset builder from the dataset script
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
+
+ For datasets on the Hugging Face Hub (list all available datasets with ``huggingface_hub.list_datasets()``)
+
+ - if ``path`` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files.
+ - if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. ``glue``, ``squad``, ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
+ By default, the datasets and metrics are stored inside the `datasets_modules` module.
+ data_dir (:obj:`str`, optional): Directory with the data files. Used only if `data_files` is not specified,
+ in which case it's equal to pass `os.path.join(data_dir, "**")` as `data_files`.
+ data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration.
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
+ the attributes in download_config if supplied.
+
+ Returns:
+ DatasetModule
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ download_config.extract_compressed_file = True
+ download_config.force_extract = True
+ download_config.force_download = download_mode == DownloadMode.FORCE_REDOWNLOAD
+
+ filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
+ if not filename.endswith(".py"):
+ filename = filename + ".py"
+ combined_path = os.path.join(path, filename)
+
+ # We have several ways to get a dataset builder:
+ #
+ # - if path is the name of a packaged dataset module
+ # -> use the packaged module (json, csv, etc.)
+ #
+ # - if os.path.join(path, name) is a local python file
+ # -> use the module from the python file
+ # - if path is a local directory (but no python file)
+ # -> use a packaged module (csv, text etc.) based on content of the directory
+ #
+ # - if path has one "/" and is dataset repository on the HF hub with a python file
+ # -> the module from the python file in the dataset repository
+ # - if path has one "/" and is dataset repository on the HF hub without a python file
+ # -> use a packaged module (csv, text etc.) based on content of the repository
+
+ # Try packaged
+ if path in _PACKAGED_DATASETS_MODULES:
+ return PackagedDatasetModuleFactory(
+ path,
+ data_dir=data_dir,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ ).get_module()
+ # Try locally
+ elif path.endswith(filename):
+ if os.path.isfile(path):
+ return LocalDatasetModuleFactoryWithScript(
+ path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(path)}")
+ elif os.path.isfile(combined_path):
+ return LocalDatasetModuleFactoryWithScript(
+ combined_path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ elif os.path.isdir(path):
+ return LocalDatasetModuleFactoryWithoutScript(
+ path, data_dir=data_dir, data_files=data_files, download_mode=download_mode
+ ).get_module()
+ # Try remotely
+ elif is_relative_path(path) and path.count("/") <= 1:
+ try:
+ _raise_if_offline_mode_is_enabled()
+ hf_api = HfApi(config.HF_ENDPOINT)
+ try:
+ dataset_info = hf_api.dataset_info(
+ repo_id=path,
+ revision=revision,
+ token=download_config.token,
+ timeout=100.0,
+ )
+ except Exception as e: # noqa catch any exception of hf_hub and consider that the dataset doesn't exist
+ if isinstance(
+ e,
+ (
+ OfflineModeIsEnabled,
+ requests.exceptions.ConnectTimeout,
+ requests.exceptions.ConnectionError,
+ ),
+ ):
+ raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({type(e).__name__})")
+ elif "404" in str(e):
+ msg = f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed"
+ raise DatasetNotFoundError(msg + f" at revision '{revision}'" if revision else msg)
+ elif "401" in str(e):
+ msg = f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed"
+ msg = msg + f" at revision '{revision}'" if revision else msg
+ raise DatasetNotFoundError(
+ msg
+ + f". If the dataset is private or gated, make sure to log in with `huggingface-cli login` or visit the dataset page at https://huggingface.co/datasets/{path} to ask for access."
+ )
+ else:
+ raise e
+ if filename in [sibling.rfilename for sibling in dataset_info.siblings]: # contains a dataset script
+ fs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
+ if _require_custom_configs or (revision and revision != "main"):
+ can_load_config_from_parquet_export = False
+ elif _require_default_config_name:
+ with fs.open(f"datasets/{path}/{filename}", "r", encoding="utf-8") as f:
+ can_load_config_from_parquet_export = "DEFAULT_CONFIG_NAME" not in f.read()
+ else:
+ can_load_config_from_parquet_export = True
+ if config.USE_PARQUET_EXPORT and can_load_config_from_parquet_export:
+ # If the parquet export is ready (parquet files + info available for the current sha), we can use it instead
+ # This fails when the dataset has multiple configs and a default config and
+ # the user didn't specify a configuration name (_require_default_config_name=True).
+ try:
+ return HubDatasetModuleFactoryWithParquetExport(
+ path, download_config=download_config, revision=dataset_info.sha
+ ).get_module()
+ except _dataset_viewer.DatasetViewerError:
+ pass
+ # Otherwise we must use the dataset script if the user trusts it
+ return HubDatasetModuleFactoryWithScript(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ return HubDatasetModuleFactoryWithoutScript(
+ path,
+ revision=revision,
+ data_dir=data_dir,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ ).get_module()
+ except Exception as e1:
+ # All the attempts failed, before raising the error we should check if the module is already cached
+ try:
+ return CachedDatasetModuleFactory(
+ path, dynamic_modules_path=dynamic_modules_path, cache_dir=cache_dir
+ ).get_module()
+ except Exception:
+ # If it's not in the cache, then it doesn't exist.
+ if isinstance(e1, OfflineModeIsEnabled):
+ raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None
+ if isinstance(e1, (DataFilesNotFoundError, DatasetNotFoundError, EmptyDatasetError)):
+ raise e1 from None
+ if isinstance(e1, FileNotFoundError):
+ raise FileNotFoundError(
+ f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory. "
+ f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}"
+ ) from None
+ raise e1 from None
+ else:
+ raise FileNotFoundError(
+ f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory."
+ )
+
+
+@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+def metric_module_factory(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ **download_kwargs,
+) -> MetricModule:
+ """
+ Download/extract/cache a metric module.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Metrics codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
+
+ Args:
+
+ path (str): Path or name of the metric script.
+
+ - if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory):
+ -> load the module from the metric script
+ e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``.
+ - if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`)
+ -> load the module from the metric script in the GitHub repository at huggingface/datasets
+ e.g. ``'accuracy'`` or ``'rouge'``.
+
+ revision (Optional ``Union[str, datasets.Version]``):
+ If specified, the module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
+ By default, the datasets and metrics are stored inside the `datasets_modules` module.
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
+ the attributes in download_config if supplied.
+
+ Returns:
+ MetricModule
+ """
+ with warnings.catch_warnings():
+ # Ignore equivalent warnings to the one already issued
+ warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning)
+
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ download_config.extract_compressed_file = True
+ download_config.force_extract = True
+
+ filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
+ if not filename.endswith(".py"):
+ filename = filename + ".py"
+ combined_path = os.path.join(path, filename)
+ # Try locally
+ if path.endswith(filename):
+ if os.path.isfile(path):
+ return LocalMetricModuleFactory(
+ path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}")
+ elif os.path.isfile(combined_path):
+ return LocalMetricModuleFactory(
+ combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
+ ).get_module()
+ elif is_relative_path(path) and path.count("/") == 0:
+ try:
+ return GithubMetricModuleFactory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ except Exception as e1: # noqa all the attempts failed, before raising the error we should check if the module is already cached.
+ try:
+ return CachedMetricModuleFactory(path, dynamic_modules_path=dynamic_modules_path).get_module()
+ except Exception: # noqa if it's not in the cache, then it doesn't exist.
+ if not isinstance(e1, FileNotFoundError):
+ raise e1 from None
+ raise FileNotFoundError(
+ f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}. "
+ f"Metric '{path}' doesn't exist on the Hugging Face Hub either."
+ ) from None
+ else:
+ raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}.")
+
+
+@deprecated("Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate")
+def load_metric(
+ path: str,
+ config_name: Optional[str] = None,
+ process_id: int = 0,
+ num_process: int = 1,
+ cache_dir: Optional[str] = None,
+ experiment_id: Optional[str] = None,
+ keep_in_memory: bool = False,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ trust_remote_code: Optional[bool] = None,
+ **metric_init_kwargs,
+) -> Metric:
+ """Load a `datasets.Metric`.
+
+
+
+ Use `evaluate.load` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+
+ path (``str``):
+ path to the metric processing script with the metric builder. Can be either:
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./metrics/rouge'`` or ``'./metrics/rogue/rouge.py'``
+ - a metric identifier on the HuggingFace datasets repo (list all available metrics with ``datasets.list_metrics()``)
+ e.g. ``'rouge'`` or ``'bleu'``
+ config_name (:obj:`str`, optional): selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset)
+ process_id (:obj:`int`, optional): for distributed evaluation: id of the process
+ num_process (:obj:`int`, optional): for distributed evaluation: total number of processes
+ cache_dir (Optional str): path to store the temporary predictions and references (default to `~/.cache/huggingface/metrics/`)
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ keep_in_memory (bool): Whether to store the temporary results in memory (defaults to False)
+ download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ revision (Optional ``Union[str, datasets.Version]``): if specified, the module will be loaded from the datasets repository
+ at this version. By default, it is set to the local version of the lib. Specifying a version that is different from
+ your local version of the lib might cause compatibility issues.
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+
+ Returns:
+ `datasets.Metric`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> accuracy = load_metric('accuracy')
+ >>> accuracy.compute(references=[1, 0], predictions=[1, 1])
+ {'accuracy': 0.5}
+ ```
+ """
+ with warnings.catch_warnings():
+ # Ignore equivalent warnings to the one already issued
+ warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning)
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ metric_module = metric_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ trust_remote_code=trust_remote_code,
+ ).module_path
+ metric_cls = import_main_class(metric_module, dataset=False)
+ metric = metric_cls(
+ config_name=config_name,
+ process_id=process_id,
+ num_process=num_process,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ experiment_id=experiment_id,
+ **metric_init_kwargs,
+ )
+
+ # Download and prepare resources for the metric
+ metric.download_and_prepare(download_config=download_config)
+
+ return metric
+
+
+def load_dataset_builder(
+ path: str,
+ name: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ cache_dir: Optional[str] = None,
+ features: Optional[Features] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ storage_options: Optional[Dict] = None,
+ trust_remote_code: Optional[bool] = None,
+ _require_default_config_name=True,
+ **config_kwargs,
+) -> DatasetBuilder:
+ """Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.)
+ without downloading the dataset itself.
+
+ You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
+
+ A dataset is a directory that contains:
+
+ - some data files in generic formats (JSON, CSV, Parquet, text, etc.)
+ - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
+
+ Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
+
+ Args:
+
+ path (`str`):
+ Path or name of the dataset.
+ Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if `path` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. `'./path/to/directory/with/my/csv/data'`.
+ - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+
+ For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
+
+ - if `path` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
+ - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_dir (`str`, *optional*):
+ Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
+ the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+ features ([`Features`], *optional*):
+ Set the features type to use for this dataset.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+ storage_options (`dict`, *optional*, defaults to `None`):
+ **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the [`BuilderConfig`]
+ and used in the [`DatasetBuilder`].
+
+ Returns:
+ [`DatasetBuilder`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.info.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ if token is not None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ download_config.token = token
+ if storage_options is not None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ download_config.storage_options.update(storage_options)
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ data_dir=data_dir,
+ data_files=data_files,
+ cache_dir=cache_dir,
+ trust_remote_code=trust_remote_code,
+ _require_default_config_name=_require_default_config_name,
+ _require_custom_configs=bool(config_kwargs),
+ )
+ # Get dataset builder class from the processing script
+ builder_kwargs = dataset_module.builder_kwargs
+ data_dir = builder_kwargs.pop("data_dir", data_dir)
+ data_files = builder_kwargs.pop("data_files", data_files)
+ config_name = builder_kwargs.pop(
+ "config_name", name or dataset_module.builder_configs_parameters.default_config_name
+ )
+ dataset_name = builder_kwargs.pop("dataset_name", None)
+ info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None
+
+ if (
+ path in _PACKAGED_DATASETS_MODULES
+ and data_files is None
+ and dataset_module.builder_configs_parameters.builder_configs[0].data_files is None
+ ):
+ error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder."
+ example_extensions = [
+ extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path
+ ]
+ if example_extensions:
+ error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`'
+ raise ValueError(error_msg)
+
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name)
+ # Instantiate the dataset builder
+ builder_instance: DatasetBuilder = builder_cls(
+ cache_dir=cache_dir,
+ dataset_name=dataset_name,
+ config_name=config_name,
+ data_dir=data_dir,
+ data_files=data_files,
+ hash=dataset_module.hash,
+ info=info,
+ features=features,
+ token=token,
+ storage_options=storage_options,
+ **builder_kwargs,
+ **config_kwargs,
+ )
+ builder_instance._use_legacy_cache_dir_if_possible(dataset_module)
+
+ return builder_instance
+
+
+def load_dataset(
+ path: str,
+ name: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ split: Optional[Union[str, Split]] = None,
+ cache_dir: Optional[str] = None,
+ features: Optional[Features] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ save_infos: bool = False,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ task="deprecated",
+ streaming: bool = False,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[Dict] = None,
+ trust_remote_code: bool = None,
+ **config_kwargs,
+) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
+ """Load a dataset from the Hugging Face Hub, or a local dataset.
+
+ You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
+
+ A dataset is a directory that contains:
+
+ - some data files in generic formats (JSON, CSV, Parquet, text, etc.).
+ - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
+
+ Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
+
+ This function does the following under the hood:
+
+ 1. Download and import in the library the dataset script from `path` if it's not already cached inside the library.
+
+ If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.)
+
+ Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset,
+ contain the path or URL to the original data files and the code to load examples from the original data files.
+
+ You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets).
+
+ 2. Run the dataset script which will:
+
+ * Download the dataset file from the original URL (see the script) if it's not already available locally or cached.
+ * Process and cache the dataset in typed Arrow tables for caching.
+
+ Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types.
+ They can be directly accessed from disk, loaded in RAM or even streamed over the web.
+
+ 3. Return a dataset built from the requested splits in `split` (default: all).
+
+ It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script.
+ In this case, it automatically loads all the data files from the directory or the dataset repository.
+
+ Args:
+
+ path (`str`):
+ Path or name of the dataset.
+ Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if `path` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. `'./path/to/directory/with/my/csv/data'`.
+ - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+
+ For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
+
+ - if `path` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
+ - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_dir (`str`, *optional*):
+ Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
+ the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ split (`Split` or `str`):
+ Which split of the data to load.
+ If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
+ If given, will return a single Dataset.
+ Splits can be combined and specified like in tensorflow-datasets.
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+ features (`Features`, *optional*):
+ Set the features type to use for this dataset.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the dataset
+ will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
+ nonzero. See more details in the [improve performance](../cache#improve-performance) section.
+ save_infos (`bool`, defaults to `False`):
+ Save the dataset information (checksums/size/splits/...).
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+ task (`str`):
+ The task to prepare the dataset for during training and evaluation. Casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
+
+
+
+ `task` was deprecated in version 2.13.0 and will be removed in 3.0.0.
+
+
+ streaming (`bool`, defaults to `False`):
+ If set to `True`, don't download the data files. Instead, it streams the data progressively while
+ iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case.
+
+ Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
+ Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
+ like rar and xz are not yet supported. The tgz format doesn't allow streaming.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*, defaults to `None`):
+ **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the `BuilderConfig`
+ and used in the [`DatasetBuilder`].
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - if `split` is not `None`: the dataset requested,
+ - if `split` is `None`, a [`~datasets.DatasetDict`] with each split.
+
+ or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True`
+
+ - if `split` is not `None`, the dataset is requested
+ - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split.
+
+ Example:
+
+ Load a dataset from the Hugging Face Hub:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='train')
+
+ # Map data files to splits
+ >>> data_files = {'train': 'train.csv', 'test': 'test.csv'}
+ >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files)
+ ```
+
+ Load a local dataset:
+
+ ```py
+ # Load a CSV file
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv')
+
+ # Load a JSON file
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json')
+
+ # Load from a local loading script
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train')
+ ```
+
+ Load an [`~datasets.IterableDataset`]:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True)
+ ```
+
+ Load an image dataset with the `ImageFolder` dataset builder:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train')
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if ignore_verifications != "deprecated":
+ verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if task != "deprecated":
+ warnings.warn(
+ "'task' was deprecated in version 2.13.0 and will be removed in 3.0.0.\n",
+ FutureWarning,
+ )
+ else:
+ task = None
+ if data_files is not None and not data_files:
+ raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).")
+ if Path(path, config.DATASET_STATE_JSON_FILENAME).exists():
+ raise ValueError(
+ "You are trying to load a dataset that was saved using `save_to_disk`. "
+ "Please use `load_from_disk` instead."
+ )
+
+ if streaming and num_proc is not None:
+ raise NotImplementedError(
+ "Loading a streaming dataset in parallel with `num_proc` is not implemented. "
+ "To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead."
+ )
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ verification_mode = VerificationMode(
+ (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS
+ )
+
+ # Create a dataset builder
+ builder_instance = load_dataset_builder(
+ path=path,
+ name=name,
+ data_dir=data_dir,
+ data_files=data_files,
+ cache_dir=cache_dir,
+ features=features,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ storage_options=storage_options,
+ trust_remote_code=trust_remote_code,
+ _require_default_config_name=name is None,
+ **config_kwargs,
+ )
+
+ # Return iterable dataset in case of streaming
+ if streaming:
+ return builder_instance.as_streaming_dataset(split=split)
+
+ # Download and prepare data
+ builder_instance.download_and_prepare(
+ download_config=download_config,
+ download_mode=download_mode,
+ verification_mode=verification_mode,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ )
+
+ # Build dataset for splits
+ keep_in_memory = (
+ keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
+ )
+ ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory)
+ # Rename and cast features to match task schema
+ if task is not None:
+ # To avoid issuing the same warning twice
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", FutureWarning)
+ ds = ds.prepare_for_task(task)
+ if save_infos:
+ builder_instance._save_infos()
+
+ return ds
+
+
+def load_from_disk(
+ dataset_path: str, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None
+) -> Union[Dataset, DatasetDict]:
+ """
+ Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or
+ from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g.
+ `"s3://my-bucket/dataset/train"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset will be
+ loaded from.
+ fs (`~filesystems.S3FileSystem` or `fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the dataset
+ will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
+ nonzero. See more details in the [improve performance](../cache#improve-performance) section.
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - If `dataset_path` is a path of a dataset directory: the dataset requested.
+ - If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_from_disk
+ >>> ds = load_from_disk('path/to/dataset/directory')
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, *_ = url_to_fs(dataset_path, **(storage_options or {}))
+ if not fs.exists(dataset_path):
+ raise FileNotFoundError(f"Directory {dataset_path} not found")
+ if fs.isfile(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile(
+ posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ ):
+ return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
+ elif fs.isfile(posixpath.join(dataset_path, config.DATASETDICT_JSON_FILENAME)):
+ return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
+ else:
+ raise FileNotFoundError(
+ f"Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory."
+ )
diff --git a/venv/lib/python3.10/site-packages/datasets/metric.py b/venv/lib/python3.10/site-packages/datasets/metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..187c5e5c925b71b26ca83021523dd55c28989d28
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/metric.py
@@ -0,0 +1,652 @@
+# Copyright 2020 The HuggingFace Datasets Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Metrics base class."""
+
+import os
+import types
+import uuid
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import pyarrow as pa
+from filelock import BaseFileLock, Timeout
+
+from . import config
+from .arrow_dataset import Dataset
+from .arrow_reader import ArrowReader
+from .arrow_writer import ArrowWriter
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager
+from .features import Features
+from .info import DatasetInfo, MetricInfo
+from .naming import camelcase_to_snakecase
+from .utils._filelock import FileLock
+from .utils.deprecation_utils import deprecated
+from .utils.logging import get_logger
+from .utils.py_utils import copyfunc, temp_seed
+
+
+logger = get_logger(__name__)
+
+
+class FileFreeLock(BaseFileLock):
+ """Thread lock until a file **cannot** be locked"""
+
+ def __init__(self, lock_file, *args, **kwargs):
+ self.filelock = FileLock(lock_file)
+ super().__init__(self.filelock.lock_file, *args, **kwargs)
+
+ def _acquire(self):
+ try:
+ self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
+ except Timeout:
+ # We couldn't acquire the lock, the file is locked!
+ self._context.lock_file_fd = self.filelock.lock_file
+ else:
+ # We were able to acquire the lock, the file is not yet locked!
+ self.filelock.release()
+ self._context.lock_file_fd = None
+
+ def _release(self):
+ self._context.lock_file_fd = None
+
+
+# lists - summarize long lists similarly to NumPy
+# arrays/tensors - let the frameworks control formatting
+def summarize_if_long_list(obj):
+ if not type(obj) == list or len(obj) <= 6: # noqa: E721
+ return f"{obj}"
+
+ def format_chunk(chunk):
+ return ", ".join(repr(x) for x in chunk)
+
+ return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
+
+
+class MetricInfoMixin:
+ """This base class exposes some attributes of MetricInfo
+ at the base level of the Metric for easy access.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ """
+
+ def __init__(self, info: MetricInfo):
+ self._metric_info = info
+
+ @property
+ def info(self):
+ """:class:`datasets.MetricInfo` object containing all the metadata in the metric."""
+ return self._metric_info
+
+ @property
+ def name(self) -> str:
+ return self._metric_info.metric_name
+
+ @property
+ def experiment_id(self) -> Optional[str]:
+ return self._metric_info.experiment_id
+
+ @property
+ def description(self) -> str:
+ return self._metric_info.description
+
+ @property
+ def citation(self) -> str:
+ return self._metric_info.citation
+
+ @property
+ def features(self) -> Features:
+ return self._metric_info.features
+
+ @property
+ def inputs_description(self) -> str:
+ return self._metric_info.inputs_description
+
+ @property
+ def homepage(self) -> Optional[str]:
+ return self._metric_info.homepage
+
+ @property
+ def license(self) -> str:
+ return self._metric_info.license
+
+ @property
+ def codebase_urls(self) -> Optional[List[str]]:
+ return self._metric_info.codebase_urls
+
+ @property
+ def reference_urls(self) -> Optional[List[str]]:
+ return self._metric_info.reference_urls
+
+ @property
+ def streamable(self) -> bool:
+ return self._metric_info.streamable
+
+ @property
+ def format(self) -> Optional[str]:
+ return self._metric_info.format
+
+
+class Metric(MetricInfoMixin):
+ """A Metric is the base class and common API for all metrics.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
+ to be overridden when the metric loading script is modified.
+ keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings.
+ cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
+ The data directory should be located on a shared file-system in distributed setups.
+ num_process (``int``): specify the total number of nodes in a distributed settings.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
+ timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ config_name: Optional[str] = None,
+ keep_in_memory: bool = False,
+ cache_dir: Optional[str] = None,
+ num_process: int = 1,
+ process_id: int = 0,
+ seed: Optional[int] = None,
+ experiment_id: Optional[str] = None,
+ max_concurrent_cache_files: int = 10000,
+ timeout: Union[int, float] = 100,
+ **kwargs,
+ ):
+ # prepare info
+ self.config_name = config_name or "default"
+ info = self._info()
+ info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
+ info.config_name = self.config_name
+ info.experiment_id = experiment_id or "default_experiment"
+ MetricInfoMixin.__init__(self, info) # For easy access on low level
+
+ # Safety checks on num_process and process_id
+ if not isinstance(process_id, int) or process_id < 0:
+ raise ValueError("'process_id' should be a number greater than 0")
+ if not isinstance(num_process, int) or num_process <= process_id:
+ raise ValueError("'num_process' should be a number greater than process_id")
+ if keep_in_memory and num_process != 1:
+ raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
+
+ self.num_process = num_process
+ self.process_id = process_id
+ self.max_concurrent_cache_files = max_concurrent_cache_files
+
+ self.keep_in_memory = keep_in_memory
+ self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
+ self.data_dir = self._build_data_dir()
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ self.seed: int = seed[pos] if pos < 624 else seed[0]
+ else:
+ self.seed: int = seed
+ self.timeout: Union[int, float] = timeout
+
+ # Update 'compute' and 'add' docstring
+ # methods need to be copied otherwise it changes the docstrings of every instance
+ self.compute = types.MethodType(copyfunc(self.compute), self)
+ self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
+ self.add = types.MethodType(copyfunc(self.add), self)
+ self.compute.__func__.__doc__ += self.info.inputs_description
+ self.add_batch.__func__.__doc__ += self.info.inputs_description
+ self.add.__func__.__doc__ += self.info.inputs_description
+
+ # self.arrow_schema = pa.schema(field for field in self.info.features.type)
+ self.buf_writer = None
+ self.writer = None
+ self.writer_batch_size = None
+ self.data = None
+
+ # This is the cache file we store our predictions/references in
+ # Keep it None for now so we can (cloud)pickle the object
+ self.cache_file_name = None
+ self.filelock = None
+ self.rendez_vous_lock = None
+
+ # This is all the cache files on which we have a lock when we are in a distributed setting
+ self.file_paths = None
+ self.filelocks = None
+
+ def __len__(self):
+ """Return the number of examples (predictions or predictions/references pair)
+ currently stored in the metric's cache.
+ """
+ return 0 if self.writer is None else len(self.writer)
+
+ def __repr__(self):
+ return (
+ f'Metric(name: "{self.name}", features: {self.features}, '
+ f'usage: """{self.inputs_description}""", '
+ f"stored examples: {len(self)})"
+ )
+
+ def _build_data_dir(self):
+ """Path of this metric in cache_dir:
+ Will be:
+ self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ builder_data_dir = self._data_dir_root
+ builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
+ os.makedirs(builder_data_dir, exist_ok=True)
+ return builder_data_dir
+
+ def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
+ """Create a new cache file. If the default cache file is used, we generated a new hash."""
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
+ filelock = None
+ for i in range(self.max_concurrent_cache_files):
+ filelock = FileLock(file_path + ".lock")
+ try:
+ filelock.acquire(timeout=timeout)
+ except Timeout:
+ # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
+ # We raise an error
+ if self.num_process != 1:
+ raise ValueError(
+ f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
+ f"between distributed metric instances."
+ ) from None
+ if i == self.max_concurrent_cache_files - 1:
+ raise ValueError(
+ f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
+ f"You should set a larger value of max_concurrent_cache_files when creating the metric "
+ f"(current value is {self.max_concurrent_cache_files})."
+ ) from None
+ # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
+ file_uuid = str(uuid.uuid4())
+ file_path = os.path.join(
+ self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
+ )
+ else:
+ break
+
+ return file_path, filelock
+
+ def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
+ """Get a lock on all the cache files in a distributed setup.
+ We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
+ """
+ if self.num_process == 1:
+ if self.cache_file_name is None:
+ raise ValueError(
+ "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
+ "at least once before calling `compute`."
+ )
+ file_paths = [self.cache_file_name]
+ else:
+ file_paths = [
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
+ for process_id in range(self.num_process)
+ ]
+
+ # Let's acquire a lock on each process files to be sure they are finished writing
+ filelocks = []
+ for process_id, file_path in enumerate(file_paths):
+ if process_id == 0: # process 0 already has its lock file
+ filelocks.append(self.filelock)
+ else:
+ filelock = FileLock(file_path + ".lock")
+ try:
+ filelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Cannot acquire lock on cached file {file_path} for process {process_id}."
+ ) from None
+ else:
+ filelocks.append(filelock)
+
+ return file_paths, filelocks
+
+ def _check_all_processes_locks(self):
+ expected_lock_file_names = [
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
+ for process_id in range(self.num_process)
+ ]
+ for expected_lock_file_name in expected_lock_file_names:
+ nofilelock = FileFreeLock(expected_lock_file_name)
+ try:
+ nofilelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
+ ) from None
+ else:
+ nofilelock.release()
+
+ def _check_rendez_vous(self):
+ expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
+ nofilelock = FileFreeLock(expected_lock_file_name)
+ try:
+ nofilelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
+ ) from None
+ else:
+ nofilelock.release()
+ lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
+ rendez_vous_lock = FileLock(lock_file_name)
+ try:
+ rendez_vous_lock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
+ else:
+ rendez_vous_lock.release()
+
+ def _finalize(self):
+ """Close all the writing process and load/gather the data
+ from all the nodes if main node or all_process is True.
+ """
+ if self.writer is not None:
+ self.writer.finalize()
+ self.writer = None
+ # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
+ if self.filelock is not None and self.process_id > 0:
+ self.filelock.release()
+
+ if self.keep_in_memory:
+ # Read the predictions and references
+ reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
+ self.data = Dataset.from_buffer(self.buf_writer.getvalue())
+
+ elif self.process_id == 0:
+ # Let's acquire a lock on each node files to be sure they are finished writing
+ file_paths, filelocks = self._get_all_cache_files()
+
+ # Read the predictions and references
+ try:
+ reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
+ self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
+ except FileNotFoundError:
+ raise ValueError(
+ "Error in finalize: another metric instance is already using the local cache file. "
+ "Please specify an experiment_id to avoid collision between distributed metric instances."
+ ) from None
+
+ # Store file paths and locks and we will release/delete them after the computation.
+ self.file_paths = file_paths
+ self.filelocks = filelocks
+
+ def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
+ """Compute the metrics.
+
+ Usage of positional arguments is not allowed to prevent mistakes.
+
+ Args:
+ predictions (list/array/tensor, optional): Predictions.
+ references (list/array/tensor, optional): References.
+ **kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute`
+ method (see details in the docstring).
+
+ Return:
+ dict or None
+
+ - Dictionary with the metrics if this metric is run on the main process (``process_id == 0``).
+ - None if the metric is not run on the main process (``process_id != 0``).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> accuracy = metric.compute(predictions=model_prediction, references=labels)
+ ```
+ """
+ all_kwargs = {"predictions": predictions, "references": references, **kwargs}
+ if predictions is None and references is None:
+ missing_kwargs = {k: None for k in self.features if k not in all_kwargs}
+ all_kwargs.update(missing_kwargs)
+ else:
+ missing_inputs = [k for k in self.features if k not in all_kwargs]
+ if missing_inputs:
+ raise ValueError(
+ f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}"
+ )
+ inputs = {input_name: all_kwargs[input_name] for input_name in self.features}
+ compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features}
+
+ if any(v is not None for v in inputs.values()):
+ self.add_batch(**inputs)
+ self._finalize()
+
+ self.cache_file_name = None
+ self.filelock = None
+
+ if self.process_id == 0:
+ self.data.set_format(type=self.info.format)
+
+ inputs = {input_name: self.data[input_name] for input_name in self.features}
+ with temp_seed(self.seed):
+ output = self._compute(**inputs, **compute_kwargs)
+
+ if self.buf_writer is not None:
+ self.buf_writer = None
+ del self.data
+ self.data = None
+ else:
+ # Release locks and delete all the cache files. Process 0 is released last.
+ for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
+ logger.info(f"Removing {file_path}")
+ del self.data
+ self.data = None
+ del self.writer
+ self.writer = None
+ os.remove(file_path)
+ filelock.release()
+
+ return output
+ else:
+ return None
+
+ def add_batch(self, *, predictions=None, references=None, **kwargs):
+ """Add a batch of predictions and references for the metric's stack.
+
+ Args:
+ predictions (list/array/tensor, optional): Predictions.
+ references (list/array/tensor, optional): References.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.add_batch(predictions=model_prediction, references=labels)
+ ```
+ """
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
+ if bad_inputs:
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
+ batch = {"predictions": predictions, "references": references, **kwargs}
+ batch = {intput_name: batch[intput_name] for intput_name in self.features}
+ batch = self.info.features.encode_batch(batch)
+ if self.writer is None:
+ self._init_writer()
+ try:
+ self.writer.write_batch(batch)
+ except pa.ArrowInvalid:
+ if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
+ col0 = next(iter(batch))
+ bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
+ error_msg = (
+ f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
+ )
+ elif sorted(self.features) != ["references", "predictions"]:
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
+ error_msg_inputs = ",\n".join(
+ f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features
+ )
+ error_msg += error_msg_inputs
+ else:
+ error_msg = (
+ f"Predictions and/or references don't match the expected format.\n"
+ f"Expected format: {self.features},\n"
+ f"Input predictions: {summarize_if_long_list(predictions)},\n"
+ f"Input references: {summarize_if_long_list(references)}"
+ )
+ raise ValueError(error_msg) from None
+
+ def add(self, *, prediction=None, reference=None, **kwargs):
+ """Add one prediction and reference for the metric's stack.
+
+ Args:
+ prediction (list/array/tensor, optional): Predictions.
+ reference (list/array/tensor, optional): References.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.add(predictions=model_predictions, references=labels)
+ ```
+ """
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
+ if bad_inputs:
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
+ example = {"predictions": prediction, "references": reference, **kwargs}
+ example = {intput_name: example[intput_name] for intput_name in self.features}
+ example = self.info.features.encode_example(example)
+ if self.writer is None:
+ self._init_writer()
+ try:
+ self.writer.write(example)
+ except pa.ArrowInvalid:
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
+ error_msg_inputs = ",\n".join(
+ f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features
+ )
+ error_msg += error_msg_inputs
+ raise ValueError(error_msg) from None
+
+ def _init_writer(self, timeout=1):
+ if self.num_process > 1:
+ if self.process_id == 0:
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
+ self.rendez_vous_lock = FileLock(file_path)
+ try:
+ self.rendez_vous_lock.acquire(timeout=timeout)
+ except TimeoutError:
+ raise ValueError(
+ f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
+ f"between distributed metric instances."
+ ) from None
+
+ if self.keep_in_memory:
+ self.buf_writer = pa.BufferOutputStream()
+ self.writer = ArrowWriter(
+ features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
+ )
+ else:
+ self.buf_writer = None
+
+ # Get cache file name and lock it
+ if self.cache_file_name is None or self.filelock is None:
+ cache_file_name, filelock = self._create_cache_file() # get ready
+ self.cache_file_name = cache_file_name
+ self.filelock = filelock
+
+ self.writer = ArrowWriter(
+ features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
+ )
+ # Setup rendez-vous here if
+ if self.num_process > 1:
+ if self.process_id == 0:
+ self._check_all_processes_locks() # wait for everyone to be ready
+ self.rendez_vous_lock.release() # let everyone go
+ else:
+ self._check_rendez_vous() # wait for master to be ready and to let everyone go
+
+ def _info(self) -> MetricInfo:
+ """Construct the MetricInfo object. See `MetricInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (MetricInfo) The metrics information
+ """
+ raise NotImplementedError
+
+ def download_and_prepare(
+ self,
+ download_config: Optional[DownloadConfig] = None,
+ dl_manager: Optional[DownloadManager] = None,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ dl_manager (:class:`DownloadManager`, optional): Specific download manager to use.
+ """
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig()
+ download_config.cache_dir = os.path.join(self.data_dir, "downloads")
+ download_config.force_download = False
+
+ dl_manager = DownloadManager(
+ dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
+ )
+
+ self._download_and_prepare(dl_manager)
+
+ def _download_and_prepare(self, dl_manager):
+ """Downloads and prepares resources for the metric.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required resources for the metric.
+
+ Args:
+ dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
+ """
+ return None
+
+ def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
+ """This method defines the common API for all the metrics in the library"""
+ raise NotImplementedError
+
+ def __del__(self):
+ if hasattr(self, "filelock") and self.filelock is not None:
+ self.filelock.release()
+ if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
+ self.rendez_vous_lock.release()
+ if hasattr(self, "writer"): # in case it was already deleted
+ del self.writer
+ if hasattr(self, "data"): # in case it was already deleted
+ del self.data
diff --git a/venv/lib/python3.10/site-packages/datasets/naming.py b/venv/lib/python3.10/site-packages/datasets/naming.py
new file mode 100644
index 0000000000000000000000000000000000000000..65e7ede10dcde8701823223ae98e7971f705f945
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/naming.py
@@ -0,0 +1,84 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Utilities for file names."""
+
+import itertools
+import os
+import re
+
+
+_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
+_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
+
+_single_underscore_re = re.compile(r"(?:/\|?*"
+
+
+def camelcase_to_snakecase(name):
+ """Convert camel-case string to snake-case."""
+ name = _uppercase_uppercase_re.sub(r"\1_\2", name)
+ name = _lowercase_uppercase_re.sub(r"\1_\2", name)
+ return name.lower()
+
+
+def snakecase_to_camelcase(name):
+ """Convert snake-case string to camel-case string."""
+ name = _single_underscore_re.split(name)
+ name = [_multiple_underscores_re.split(n) for n in name]
+ return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
+
+
+def filename_prefix_for_name(name):
+ if os.path.basename(name) != name:
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
+ return camelcase_to_snakecase(name)
+
+
+def filename_prefix_for_split(name, split):
+ if os.path.basename(name) != name:
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
+ return f"{filename_prefix_for_name(name)}-{split}"
+
+
+def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
+ prefix = filename_prefix_for_split(dataset_name, split)
+ if filetype_suffix:
+ prefix += f".{filetype_suffix}"
+ filepath = os.path.join(data_dir, prefix)
+ return f"{filepath}*"
+
+
+def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
+ prefix = filename_prefix_for_split(dataset_name, split)
+ prefix = os.path.join(path, prefix)
+
+ if shard_lengths:
+ num_shards = len(shard_lengths)
+ filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
+ if filetype_suffix:
+ filenames = [filename + f".{filetype_suffix}" for filename in filenames]
+ return filenames
+ else:
+ filename = prefix
+ if filetype_suffix:
+ filename += f".{filetype_suffix}"
+ return [filename]
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf1408eaf91901a0dcf886ef9d085b79fb422b49
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/__init__.py
@@ -0,0 +1,71 @@
+import inspect
+import re
+from typing import Dict, List, Tuple
+
+from huggingface_hub.utils import insecure_hashlib
+
+from .arrow import arrow
+from .audiofolder import audiofolder
+from .cache import cache # noqa F401
+from .csv import csv
+from .imagefolder import imagefolder
+from .json import json
+from .pandas import pandas
+from .parquet import parquet
+from .sql import sql # noqa F401
+from .text import text
+from .webdataset import webdataset
+
+
+def _hash_python_lines(lines: List[str]) -> str:
+ filtered_lines = []
+ for line in lines:
+ line = re.sub(r"#.*", "", line) # remove comments
+ if line:
+ filtered_lines.append(line)
+ full_str = "\n".join(filtered_lines)
+
+ # Make a hash from all this code
+ full_bytes = full_str.encode("utf-8")
+ return insecure_hashlib.sha256(full_bytes).hexdigest()
+
+
+# get importable module names and hash for caching
+_PACKAGED_DATASETS_MODULES = {
+ "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
+ "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
+ "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
+ "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
+ "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
+ "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
+ "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
+ "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
+ "webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
+}
+
+# Used to infer the module to use based on the data files extensions
+_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
+ ".csv": ("csv", {}),
+ ".tsv": ("csv", {"sep": "\t"}),
+ ".json": ("json", {}),
+ ".jsonl": ("json", {}),
+ ".parquet": ("parquet", {}),
+ ".geoparquet": ("parquet", {}),
+ ".gpq": ("parquet", {}),
+ ".arrow": ("arrow", {}),
+ ".txt": ("text", {}),
+ ".tar": ("webdataset", {}),
+}
+_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
+_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
+_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
+_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
+_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
+
+# Used to filter data files based on extensions given a module name
+_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
+for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
+ _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
+
+for _module in _MODULE_TO_EXTENSIONS:
+ _MODULE_TO_EXTENSIONS[_module].append(".zip")
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1775a361710bba5e876af8154444f193d9174c27
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4eb93e88e7154d406c3cd0f88f5df405d1f92197
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/__pycache__/arrow.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd1ecbf12da4a1541115500d38d311346fc161d5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py
@@ -0,0 +1,74 @@
+import itertools
+from dataclasses import dataclass
+from typing import Optional
+
+import pyarrow as pa
+
+import datasets
+from datasets.table import table_cast
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class ArrowConfig(datasets.BuilderConfig):
+ """BuilderConfig for Arrow."""
+
+ features: Optional[datasets.Features] = None
+
+
+class Arrow(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = ArrowConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ # Infer features is they are stoed in the arrow schema
+ if self.info.features is None:
+ for file in itertools.chain.from_iterable(files):
+ with open(file, "rb") as f:
+ self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
+ break
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.info.features is not None:
+ # more expensive cast to support nested features with keys in a different order
+ # allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ with open(file, "rb") as f:
+ try:
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
+ pa_table = pa.Table.from_batches([record_batch])
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
+ except ValueError as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4d95187870f820410feafbe8ab219684d149c0c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..03caf9c82f9811b5958f106d10a01fcd7b27148c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py
new file mode 100644
index 0000000000000000000000000000000000000000..51044143039e98af0f9fd7d1ecdf1cab229e58a1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/audiofolder.py
@@ -0,0 +1,68 @@
+from typing import List
+
+import datasets
+from datasets.tasks import AudioClassification
+
+from ..folder_based_builder import folder_based_builder
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
+ """Builder Config for AudioFolder."""
+
+ drop_labels: bool = None
+ drop_metadata: bool = None
+
+
+class AudioFolder(folder_based_builder.FolderBasedBuilder):
+ BASE_FEATURE = datasets.Audio
+ BASE_COLUMN_NAME = "audio"
+ BUILDER_CONFIG_CLASS = AudioFolderConfig
+ EXTENSIONS: List[str] # definition at the bottom of the script
+ CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
+
+
+# Obtained with:
+# ```
+# import soundfile as sf
+#
+# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
+#
+# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
+# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
+# ```
+# We intentionally do not run this code on launch because:
+# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
+# (2) To ensure the list of supported extensions is deterministic
+AUDIO_EXTENSIONS = [
+ ".aiff",
+ ".au",
+ ".avr",
+ ".caf",
+ ".flac",
+ ".htk",
+ ".svx",
+ ".mat4",
+ ".mat5",
+ ".mpc2k",
+ ".ogg",
+ ".paf",
+ ".pvf",
+ ".raw",
+ ".rf64",
+ ".sd2",
+ ".sds",
+ ".ircam",
+ ".voc",
+ ".w64",
+ ".wav",
+ ".nist",
+ ".wavex",
+ ".wve",
+ ".xi",
+ ".mp3",
+ ".opus",
+]
+AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..465f412c331dfa34c643aab4f520caa0980a5aca
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b60fc672e00538cb07fc836f9e5f78a52a4019e9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..1efa721b159668a72d29f5afa38c36bcaff084ea
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py
@@ -0,0 +1,31 @@
+from dataclasses import dataclass
+from typing import Callable, Optional
+
+import datasets
+
+
+@dataclass
+class GeneratorConfig(datasets.BuilderConfig):
+ generator: Optional[Callable] = None
+ gen_kwargs: Optional[dict] = None
+ features: Optional[datasets.Features] = None
+
+ def __post_init__(self):
+ assert self.generator is not None, "generator must be specified"
+
+ if self.gen_kwargs is None:
+ self.gen_kwargs = {}
+
+
+class Generator(datasets.GeneratorBasedBuilder):
+ BUILDER_CONFIG_CLASS = GeneratorConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)]
+
+ def _generate_examples(self, **gen_kwargs):
+ for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
+ yield idx, ex
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24bffe7a68037a887983213003042cd3e9324b3d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..679b61a8fdb436f20ca64eeff031b60e0e0f5335
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd2dd0d419a626dbb5149cb56abf69c82d35deb4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py
@@ -0,0 +1,104 @@
+from typing import List
+
+import datasets
+from datasets.tasks import ImageClassification
+
+from ..folder_based_builder import folder_based_builder
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
+ """BuilderConfig for ImageFolder."""
+
+ drop_labels: bool = None
+ drop_metadata: bool = None
+
+
+class ImageFolder(folder_based_builder.FolderBasedBuilder):
+ BASE_FEATURE = datasets.Image
+ BASE_COLUMN_NAME = "image"
+ BUILDER_CONFIG_CLASS = ImageFolderConfig
+ EXTENSIONS: List[str] # definition at the bottom of the script
+ CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label")
+
+
+# Obtained with:
+# ```
+# import PIL.Image
+# IMAGE_EXTENSIONS = []
+# PIL.Image.init()
+# for ext, format in PIL.Image.EXTENSION.items():
+# if format in PIL.Image.OPEN:
+# IMAGE_EXTENSIONS.append(ext[1:])
+# ```
+# We intentionally do not run this code on launch because:
+# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
+# (2) To ensure the list of supported extensions is deterministic
+IMAGE_EXTENSIONS = [
+ ".blp",
+ ".bmp",
+ ".dib",
+ ".bufr",
+ ".cur",
+ ".pcx",
+ ".dcx",
+ ".dds",
+ ".ps",
+ ".eps",
+ ".fit",
+ ".fits",
+ ".fli",
+ ".flc",
+ ".ftc",
+ ".ftu",
+ ".gbr",
+ ".gif",
+ ".grib",
+ ".h5",
+ ".hdf",
+ ".png",
+ ".apng",
+ ".jp2",
+ ".j2k",
+ ".jpc",
+ ".jpf",
+ ".jpx",
+ ".j2c",
+ ".icns",
+ ".ico",
+ ".im",
+ ".iim",
+ ".tif",
+ ".tiff",
+ ".jfif",
+ ".jpe",
+ ".jpg",
+ ".jpeg",
+ ".mpg",
+ ".mpeg",
+ ".msp",
+ ".pcd",
+ ".pxr",
+ ".pbm",
+ ".pgm",
+ ".ppm",
+ ".pnm",
+ ".psd",
+ ".bw",
+ ".rgb",
+ ".rgba",
+ ".sgi",
+ ".ras",
+ ".tga",
+ ".icb",
+ ".vda",
+ ".vst",
+ ".webp",
+ ".wmf",
+ ".emf",
+ ".xbm",
+ ".xpm",
+]
+ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f78810507c4035f0bdd07dd029af3eabc4befe1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e01171398e254e4a3250321918088ecf6ce9e8d5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py
new file mode 100644
index 0000000000000000000000000000000000000000..c17f389945e0fa55959e220e0b892cd7b3e8925d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py
@@ -0,0 +1,62 @@
+import itertools
+import warnings
+from dataclasses import dataclass
+from typing import Optional
+
+import pandas as pd
+import pyarrow as pa
+
+import datasets
+from datasets.table import table_cast
+
+
+@dataclass
+class PandasConfig(datasets.BuilderConfig):
+ """BuilderConfig for Pandas."""
+
+ features: Optional[datasets.Features] = None
+
+
+class Pandas(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = PandasConfig
+
+ def _info(self):
+ warnings.warn(
+ "The Pandas builder is deprecated and will be removed in the next major version of datasets.",
+ FutureWarning,
+ )
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ # more expensive cast to support nested features with keys in a different order
+ # allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, self.config.features.arrow_schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ for i, file in enumerate(itertools.chain.from_iterable(files)):
+ with open(file, "rb") as f:
+ pa_table = pa.Table.from_pandas(pd.read_pickle(f))
+ yield i, self._cast_table(pa_table)
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b9c301bd462e4d9afa0b8b5ffac1009118e4ada
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/parquet.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/parquet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11536d0e7038d7626f82c9cbb016169afbdcbd60
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/parquet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py
new file mode 100644
index 0000000000000000000000000000000000000000..399a2609f7e7012d84c72fb3c2a2662a28d70c22
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py
@@ -0,0 +1,100 @@
+import itertools
+from dataclasses import dataclass
+from typing import List, Optional
+
+import pyarrow as pa
+import pyarrow.parquet as pq
+
+import datasets
+from datasets.table import table_cast
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class ParquetConfig(datasets.BuilderConfig):
+ """BuilderConfig for Parquet."""
+
+ batch_size: Optional[int] = None
+ columns: Optional[List[str]] = None
+ features: Optional[datasets.Features] = None
+
+
+class Parquet(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = ParquetConfig
+
+ def _info(self):
+ if (
+ self.config.columns is not None
+ and self.config.features is not None
+ and set(self.config.columns) != set(self.config.features)
+ ):
+ raise ValueError(
+ "The columns and features argument must contain the same columns, but got ",
+ f"{self.config.columns} and {self.config.features}",
+ )
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ # Infer features if they are stored in the arrow schema
+ if self.info.features is None:
+ for file in itertools.chain.from_iterable(files):
+ with open(file, "rb") as f:
+ self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
+ break
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
+ self.info.features = datasets.Features(
+ {col: feat for col, feat in self.info.features.items() if col in self.config.columns}
+ )
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.info.features is not None:
+ # more expensive cast to support nested features with keys in a different order
+ # allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ if self.config.features is not None and self.config.columns is not None:
+ if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
+ raise ValueError(
+ f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
+ )
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ with open(file, "rb") as f:
+ parquet_file = pq.ParquetFile(f)
+ if parquet_file.metadata.num_row_groups > 0:
+ batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows
+ try:
+ for batch_idx, record_batch in enumerate(
+ parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)
+ ):
+ pa_table = pa.Table.from_batches([record_batch])
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
+ except ValueError as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49e5e054baae4c5de891ed3228caeb25a9134117
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e229b55a8a216569e97920a5db08df13cb2dac81
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py
new file mode 100644
index 0000000000000000000000000000000000000000..47e07a0e4b35c9fa2af53c1a6455ac61e00ddf29
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py
@@ -0,0 +1,129 @@
+import itertools
+import warnings
+from dataclasses import InitVar, dataclass
+from io import StringIO
+from typing import Optional
+
+import pyarrow as pa
+
+import datasets
+from datasets.features.features import require_storage_cast
+from datasets.table import table_cast
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class TextConfig(datasets.BuilderConfig):
+ """BuilderConfig for text files."""
+
+ features: Optional[datasets.Features] = None
+ encoding: str = "utf-8"
+ errors: InitVar[Optional[str]] = "deprecated"
+ encoding_errors: Optional[str] = None
+ chunksize: int = 10 << 20 # 10MB
+ keep_linebreaks: bool = False
+ sample_by: str = "line"
+
+ def __post_init__(self, errors):
+ if errors != "deprecated":
+ warnings.warn(
+ "'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'encoding_errors={errors}' instead.",
+ FutureWarning,
+ )
+ self.encoding_errors = errors
+
+
+class Text(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = TextConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
+
+ If str or List[str], then the dataset returns only the 'train' split.
+ If dict, then keys should be from the `datasets.Split` enum.
+ """
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ files = [dl_manager.iter_files(file) for file in files]
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ schema = self.config.features.arrow_schema
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
+ # cheaper cast
+ pa_table = pa_table.cast(schema)
+ else:
+ # more expensive cast; allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, schema)
+ return pa_table
+ else:
+ return pa_table.cast(pa.schema({"text": pa.string()}))
+
+ def _generate_tables(self, files):
+ pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
+ with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
+ if self.config.sample_by == "line":
+ batch_idx = 0
+ while True:
+ batch = f.read(self.config.chunksize)
+ if not batch:
+ break
+ batch += f.readline() # finish current line
+ # StringIO.readlines, by default splits only on "\n" (and keeps line breaks)
+ batch = StringIO(batch).readlines()
+ if not self.config.keep_linebreaks:
+ batch = [line.rstrip("\n") for line in batch]
+ pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
+ batch_idx += 1
+ elif self.config.sample_by == "paragraph":
+ batch_idx = 0
+ batch = ""
+ while True:
+ new_batch = f.read(self.config.chunksize)
+ if not new_batch:
+ break
+ batch += new_batch
+ batch += f.readline() # finish current line
+ batch = batch.split("\n\n")
+ pa_table = pa.Table.from_arrays(
+ [pa.array([example for example in batch[:-1] if example])], names=pa_table_names
+ )
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
+ batch_idx += 1
+ batch = batch[-1]
+ if batch:
+ pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names)
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
+ elif self.config.sample_by == "document":
+ text = f.read()
+ pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)
+ yield file_idx, self._cast_table(pa_table)
diff --git a/venv/lib/python3.10/site-packages/datasets/search.py b/venv/lib/python3.10/site-packages/datasets/search.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f76f9b671fda755b4b7a53822edf7c6f73b56aa
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/search.py
@@ -0,0 +1,785 @@
+import importlib.util
+import os
+import tempfile
+from pathlib import PurePath
+from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union
+
+import fsspec
+import numpy as np
+
+from .features import Sequence
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+
+
+if TYPE_CHECKING:
+ from .arrow_dataset import Dataset # noqa: F401
+
+ try:
+ from elasticsearch import Elasticsearch # noqa: F401
+
+ except ImportError:
+ pass
+ try:
+ import faiss # noqa: F401
+
+ except ImportError:
+ pass
+
+_has_elasticsearch = importlib.util.find_spec("elasticsearch") is not None
+_has_faiss = importlib.util.find_spec("faiss") is not None
+
+
+logger = logging.get_logger(__name__)
+
+
+class MissingIndex(Exception):
+ pass
+
+
+class SearchResults(NamedTuple):
+ scores: List[float]
+ indices: List[int]
+
+
+class BatchedSearchResults(NamedTuple):
+ total_scores: List[List[float]]
+ total_indices: List[List[int]]
+
+
+class NearestExamplesResults(NamedTuple):
+ scores: List[float]
+ examples: dict
+
+
+class BatchedNearestExamplesResults(NamedTuple):
+ total_scores: List[List[float]]
+ total_examples: List[dict]
+
+
+class BaseIndex:
+ """Base class for indexing"""
+
+ def search(self, query, k: int = 10, **kwargs) -> SearchResults:
+ """
+ To implement.
+ This method has to return the scores and the indices of the retrieved examples given a certain query.
+ """
+ raise NotImplementedError
+
+ def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.
+ k (`int`): The number of examples to retrieve per query.
+
+ Ouput:
+ total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
+ total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
+ """
+ total_scores, total_indices = [], []
+ for query in queries:
+ scores, indices = self.search(query, k)
+ total_scores.append(scores)
+ total_indices.append(indices)
+ return BatchedSearchResults(total_scores, total_indices)
+
+ def save(self, file: Union[str, PurePath]):
+ """Serialize the index on disk"""
+ raise NotImplementedError
+
+ @classmethod
+ def load(cls, file: Union[str, PurePath]) -> "BaseIndex":
+ """Deserialize the index from disk"""
+ raise NotImplementedError
+
+
+class ElasticSearchIndex(BaseIndex):
+ """
+ Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.
+ An Elasticsearch server needs to be accessible, and a python client is declared with
+ ```
+ es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
+ ```
+ for example.
+ """
+
+ def __init__(
+ self,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ if not _has_elasticsearch:
+ raise ImportError(
+ "You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`"
+ )
+ if es_client is not None and (host is not None or port is not None):
+ raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.")
+ host = host or "localhost"
+ port = port or 9200
+
+ import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features
+ from elasticsearch import Elasticsearch # noqa: F811
+
+ self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}])
+ self.es_index_name = (
+ es_index_name
+ if es_index_name is not None
+ else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name)
+ )
+ self.es_index_config = (
+ es_index_config
+ if es_index_config is not None
+ else {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}},
+ }
+ )
+
+ def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None):
+ """
+ Add documents to the index.
+ If the documents are inside a certain column, you can specify it using the `column` argument.
+ """
+ index_name = self.es_index_name
+ index_config = self.es_index_config
+ self.es_client.indices.create(index=index_name, body=index_config)
+ number_of_docs = len(documents)
+ progress = hf_tqdm(unit="docs", total=number_of_docs)
+ successes = 0
+
+ def passage_generator():
+ if column is not None:
+ for i, example in enumerate(documents):
+ yield {"text": example[column], "_id": i}
+ else:
+ for i, example in enumerate(documents):
+ yield {"text": example, "_id": i}
+
+ # create the ES index
+ import elasticsearch as es
+
+ for ok, action in es.helpers.streaming_bulk(
+ client=self.es_client,
+ index=index_name,
+ actions=passage_generator(),
+ ):
+ progress.update(1)
+ successes += ok
+ if successes != len(documents):
+ logger.warning(
+ f"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}"
+ )
+ logger.info(f"Indexed {successes:d} documents")
+
+ def search(self, query: str, k=10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ query (`str`): The query as a string.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ scores (`List[List[float]`): The retrieval scores of the retrieved examples.
+ indices (`List[List[int]]`): The indices of the retrieved examples.
+ """
+ response = self.es_client.search(
+ index=self.es_index_name,
+ body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k},
+ **kwargs,
+ )
+ hits = response["hits"]["hits"]
+ return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits])
+
+ def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults:
+ import concurrent.futures
+
+ total_scores, total_indices = [None] * len(queries), [None] * len(queries)
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+ future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)}
+ for future in concurrent.futures.as_completed(future_to_index):
+ index = future_to_index[future]
+ results: SearchResults = future.result()
+ total_scores[index] = results.scores
+ total_indices[index] = results.indices
+ return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores)
+
+
+class FaissIndex(BaseIndex):
+ """
+ Dense index using Faiss. It is used to index vectors.
+ Faiss is a library for efficient similarity search and clustering of dense vectors.
+ It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.
+ You can find more information about Faiss here:
+ - For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+ - For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU
+ """
+
+ def __init__(
+ self,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ ):
+ """
+ Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+ """
+ if string_factory is not None and custom_index is not None:
+ raise ValueError("Please specify either `string_factory` or `custom_index` but not both.")
+ if device is not None and custom_index is not None:
+ raise ValueError(
+ "Cannot pass both 'custom_index' and 'device'. "
+ "Pass 'custom_index' already transferred to the target device instead."
+ )
+ self.device = device
+ self.string_factory = string_factory
+ self.metric_type = metric_type
+ self.faiss_index = custom_index
+ if not _has_faiss:
+ raise ImportError(
+ "You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. "
+ "A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. "
+ "Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available."
+ )
+
+ def add_vectors(
+ self,
+ vectors: Union[np.array, "Dataset"],
+ column: Optional[str] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: Optional[bool] = None,
+ ):
+ """
+ Add vectors to the index.
+ If the arrays are inside a certain column, you can specify it using the `column` argument.
+ """
+ import faiss # noqa: F811
+
+ if column and not isinstance(vectors.features[column], Sequence):
+ raise ValueError(
+ f"Wrong feature type for column '{column}'. Expected 1d array, got {vectors.features[column]}"
+ )
+
+ # Create index
+ if self.faiss_index is None:
+ size = len(vectors[0]) if column is None else len(vectors[0][column])
+ if self.string_factory is not None:
+ if self.metric_type is None:
+ index = faiss.index_factory(size, self.string_factory)
+ else:
+ index = faiss.index_factory(size, self.string_factory, self.metric_type)
+ else:
+ if self.metric_type is None:
+ index = faiss.IndexFlat(size)
+ else:
+ index = faiss.IndexFlat(size, self.metric_type)
+
+ self.faiss_index = self._faiss_index_to_device(index, self.device)
+ logger.info(f"Created faiss index of type {type(self.faiss_index)}")
+
+ # Set verbosity level
+ if faiss_verbose is not None:
+ self.faiss_index.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None:
+ self.faiss_index.index.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None:
+ self.faiss_index.quantizer.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None:
+ self.faiss_index.clustering_index.verbose = faiss_verbose
+
+ # Train
+ if train_size is not None:
+ train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]
+ logger.info(f"Training the index with the first {len(train_vecs)} vectors")
+ self.faiss_index.train(train_vecs)
+ else:
+ logger.info("Ignored the training step of the faiss index as `train_size` is None.")
+
+ # Add vectors
+ logger.info(f"Adding {len(vectors)} vectors to the faiss index")
+ for i in hf_tqdm(range(0, len(vectors), batch_size)):
+ vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]
+ self.faiss_index.add(vecs)
+
+ @staticmethod
+ def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index":
+ """
+ Sends a faiss index to a device.
+ A device can either be a positive integer (GPU id), a negative integer (all GPUs),
+ or a list of positive integers (select GPUs to use), or `None` for CPU.
+ """
+
+ # If device is not specified, then it runs on CPU.
+ if device is None:
+ return index
+
+ import faiss # noqa: F811
+
+ # If the device id is given as an integer
+ if isinstance(device, int):
+ # Positive integers are directly mapped to GPU ids
+ if device > -1:
+ faiss_res = faiss.StandardGpuResources()
+ index = faiss.index_cpu_to_gpu(faiss_res, device, index)
+ # And negative integers mean using all GPUs
+ else:
+ index = faiss.index_cpu_to_all_gpus(index)
+ # Device ids given as a list mean mapping to those devices specified.
+ elif isinstance(device, (list, tuple)):
+ index = faiss.index_cpu_to_gpus_list(index, gpus=list(device))
+ else:
+ raise TypeError(
+ f"The argument type: {type(device)} is not expected. "
+ + "Please pass in either nothing, a positive int, a negative int, or a list of positive ints."
+ )
+
+ return index
+
+ def search(self, query: np.array, k=10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ query (`np.array`): The query as a numpy array.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ scores (`List[List[float]`): The retrieval scores of the retrieved examples.
+ indices (`List[List[int]]`): The indices of the retrieved examples.
+ """
+ if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1):
+ raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)")
+
+ queries = query.reshape(1, -1)
+ if not queries.flags.c_contiguous:
+ queries = np.asarray(queries, order="C")
+ scores, indices = self.faiss_index.search(queries, k, **kwargs)
+ return SearchResults(scores[0], indices[0].astype(int))
+
+ def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults:
+ """Find the nearest examples indices to the queries.
+
+ Args:
+ queries (`np.array`): The queries as a numpy array.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
+ total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
+ """
+ if len(queries.shape) != 2:
+ raise ValueError("Shape of query must be 2D")
+ if not queries.flags.c_contiguous:
+ queries = np.asarray(queries, order="C")
+ scores, indices = self.faiss_index.search(queries, k, **kwargs)
+ return BatchedSearchResults(scores, indices.astype(int))
+
+ def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
+ """Serialize the FaissIndex on disk"""
+ import faiss # noqa: F811
+
+ if self.device is not None and isinstance(self.device, (int, list, tuple)):
+ index = faiss.index_gpu_to_cpu(self.faiss_index)
+ else:
+ index = self.faiss_index
+
+ with fsspec.open(str(file), "wb", **(storage_options or {})) as f:
+ faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write)))
+
+ @classmethod
+ def load(
+ cls,
+ file: Union[str, PurePath],
+ device: Optional[Union[int, List[int]]] = None,
+ storage_options: Optional[Dict] = None,
+ ) -> "FaissIndex":
+ """Deserialize the FaissIndex from disk"""
+ import faiss # noqa: F811
+
+ # Instances of FaissIndex is essentially just a wrapper for faiss indices.
+ faiss_index = cls(device=device)
+ with fsspec.open(str(file), "rb", **(storage_options or {})) as f:
+ index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read)))
+ faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device)
+ return faiss_index
+
+
+class IndexableMixin:
+ """Add indexing features to `datasets.Dataset`"""
+
+ def __init__(self):
+ self._indexes: Dict[str, BaseIndex] = {}
+
+ def __len__(self):
+ raise NotImplementedError
+
+ def __getitem__(self, key):
+ raise NotImplementedError
+
+ def is_index_initialized(self, index_name: str) -> bool:
+ return index_name in self._indexes
+
+ def _check_index_is_initialized(self, index_name: str):
+ if not self.is_index_initialized(index_name):
+ raise MissingIndex(
+ f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first."
+ )
+
+ def list_indexes(self) -> List[str]:
+ """List the `colindex_nameumns`/identifiers of all the attached indexes."""
+ return list(self._indexes)
+
+ def get_index(self, index_name: str) -> BaseIndex:
+ """List the `index_name`/identifiers of all the attached indexes.
+
+ Args:
+ index_name (`str`): Index name.
+
+ Returns:
+ [`BaseIndex`]
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name]
+
+ def add_faiss_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of the specified column.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+
+ Args:
+ column (`str`): The column of the vectors to add to the index.
+ index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ By default it corresponds to `column`.
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
+ metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
+ """
+ index_name = index_name if index_name is not None else column
+ faiss_index = FaissIndex(
+ device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
+ )
+ faiss_index.add_vectors(
+ self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
+ )
+ self._indexes[index_name] = faiss_index
+
+ def add_faiss_index_from_external_arrays(
+ self,
+ external_arrays: np.array,
+ index_name: str,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of `external_arrays`.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+
+ Args:
+ external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
+ It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
+ metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
+ """
+ faiss_index = FaissIndex(
+ device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
+ )
+ faiss_index.add_vectors(
+ external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
+ )
+ self._indexes[index_name] = faiss_index
+
+ def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
+ """Save a FaissIndex on disk.
+
+ Args:
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ """
+ index = self.get_index(index_name)
+ if not isinstance(index, FaissIndex):
+ raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'")
+ index.save(file, storage_options=storage_options)
+ logger.info(f"Saved FaissIndex {index_name} at {file}")
+
+ def load_faiss_index(
+ self,
+ index_name: str,
+ file: Union[str, PurePath],
+ device: Optional[Union[int, List[int]]] = None,
+ storage_options: Optional[Dict] = None,
+ ):
+ """Load a FaissIndex from disk.
+
+ If you want to do additional configurations, you can have access to the faiss index object by doing
+ `.get_index(index_name).faiss_index` to make it fit your needs.
+
+ Args:
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to
+ call `.get_nearest` or `.search`.
+ file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ """
+ index = FaissIndex.load(file, device=device, storage_options=storage_options)
+ if index.faiss_index.ntotal != len(self):
+ raise ValueError(
+ f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples."
+ )
+ self._indexes[index_name] = index
+ logger.info(f"Loaded FaissIndex {index_name} from {file}")
+
+ def add_elasticsearch_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Add a text index using ElasticSearch for fast retrieval.
+
+ Args:
+ column (`str`): The column of the documents to add to the index.
+ index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.
+ By default it corresponds to `column`.
+ host (Optional `str`, defaults to localhost):
+ host of where ElasticSearch is running
+ port (Optional `str`, defaults to 9200):
+ port of where ElasticSearch is running
+ es_client (Optional `elasticsearch.Elasticsearch`):
+ The elasticsearch client used to create the index if host and port are None.
+ es_index_name (Optional `str`): The elasticsearch index name used to create the index.
+ es_index_config (Optional `dict`):
+ The configuration of the elasticsearch index.
+ Default config is:
+
+ Config::
+
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ """
+ index_name = index_name if index_name is not None else column
+ es_index = ElasticSearchIndex(
+ host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
+ )
+ es_index.add_documents(self, column=column)
+ self._indexes[index_name] = es_index
+
+ def load_elasticsearch_index(
+ self,
+ index_name: str,
+ es_index_name: str,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Load an existing text index using ElasticSearch for fast retrieval.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`.
+ es_index_name (`str`):
+ The name of elasticsearch index to load.
+ host (`str`, *optional*, defaults to `localhost`):
+ Host of where ElasticSearch is running.
+ port (`str`, *optional*, defaults to `9200`):
+ Port of where ElasticSearch is running.
+ es_client (`elasticsearch.Elasticsearch`, *optional*):
+ The elasticsearch client used to create the index if host and port are `None`.
+ es_index_config (`dict`, *optional*):
+ The configuration of the elasticsearch index.
+ Default config is:
+ ```
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ ```
+ """
+ self._indexes[index_name] = ElasticSearchIndex(
+ host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
+ )
+
+ def drop_index(self, index_name: str):
+ """Drop the index with the specified column.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ """
+ del self._indexes[index_name]
+
+ def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The name/identifier of the index.
+ query (`Union[str, np.ndarray]`):
+ The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve.
+
+ Returns:
+ `(scores, indices)`:
+ A tuple of `(scores, indices)` where:
+ - **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
+ - **indices** (`List[List[int]]`): the indices of the retrieved examples
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name].search(query, k, **kwargs)
+
+ def search_batch(
+ self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
+ ) -> BatchedSearchResults:
+ """Find the nearest examples indices in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ queries (`Union[List[str], np.ndarray]`):
+ The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve per query.
+
+ Returns:
+ `(total_scores, total_indices)`:
+ A tuple of `(total_scores, total_indices)` where:
+ - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
+ - **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name].search_batch(queries, k, **kwargs)
+
+ def get_nearest_examples(
+ self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs
+ ) -> NearestExamplesResults:
+ """Find the nearest examples in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The index_name/identifier of the index.
+ query (`Union[str, np.ndarray]`):
+ The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve.
+
+ Returns:
+ `(scores, examples)`:
+ A tuple of `(scores, examples)` where:
+ - **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
+ - **examples** (`dict`): the retrieved examples
+ """
+ self._check_index_is_initialized(index_name)
+ scores, indices = self.search(index_name, query, k, **kwargs)
+ top_indices = [i for i in indices if i >= 0]
+ return NearestExamplesResults(scores[: len(top_indices)], self[top_indices])
+
+ def get_nearest_examples_batch(
+ self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
+ ) -> BatchedNearestExamplesResults:
+ """Find the nearest examples in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ queries (`Union[List[str], np.ndarray]`):
+ The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve per query.
+
+ Returns:
+ `(total_scores, total_examples)`:
+ A tuple of `(total_scores, total_examples)` where:
+ - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
+ - **total_examples** (`List[dict]`): the retrieved examples per query
+ """
+ self._check_index_is_initialized(index_name)
+ total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs)
+ total_scores = [
+ scores_i[: len([i for i in indices_i if i >= 0])]
+ for scores_i, indices_i in zip(total_scores, total_indices)
+ ]
+ total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices]
+ return BatchedNearestExamplesResults(total_scores, total_samples)
diff --git a/venv/lib/python3.10/site-packages/datasets/splits.py b/venv/lib/python3.10/site-packages/datasets/splits.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd4966cb4007adc9f47fd78cf2b0a1732913aaef
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/splits.py
@@ -0,0 +1,635 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Splits related API."""
+
+import abc
+import collections
+import copy
+import dataclasses
+import re
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Union
+
+from .arrow_reader import FileInstructions, make_file_instructions
+from .naming import _split_re
+from .utils.py_utils import NonMutableDict, asdict
+
+
+@dataclass
+class SplitInfo:
+ name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True})
+ num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
+ num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
+ shard_lengths: Optional[List[int]] = None
+
+ # Deprecated
+ # For backward compatibility, this field needs to always be included in files like
+ # dataset_infos.json and dataset_info.json files
+ # To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info)
+ dataset_name: Optional[str] = dataclasses.field(
+ default=None, metadata={"include_in_asdict_even_if_is_default": True}
+ )
+
+ @property
+ def file_instructions(self):
+ """Returns the list of dict(filename, take, skip)."""
+ # `self.dataset_name` is assigned in `SplitDict.add()`.
+ instructions = make_file_instructions(
+ name=self.dataset_name,
+ split_infos=[self],
+ instruction=str(self.name),
+ )
+ return instructions.file_instructions
+
+
+@dataclass
+class SubSplitInfo:
+ """Wrapper around a sub split info.
+ This class expose info on the subsplit:
+ ```
+ ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True)
+ info.splits['train[75%:]'].num_examples
+ ```
+ """
+
+ instructions: FileInstructions
+
+ @property
+ def num_examples(self):
+ """Returns the number of example in the subsplit."""
+ return self.instructions.num_examples
+
+ @property
+ def file_instructions(self):
+ """Returns the list of dict(filename, take, skip)."""
+ return self.instructions.file_instructions
+
+
+class SplitBase(metaclass=abc.ABCMeta):
+ # pylint: disable=line-too-long
+ """Abstract base class for Split compositionality.
+
+ See the
+ [guide on splits](../loading#slice-splits)
+ for more information.
+
+ There are three parts to the composition:
+ 1) The splits are composed (defined, merged, split,...) together before
+ calling the `.as_dataset()` function. This is done with the `__add__`,
+ `__getitem__`, which return a tree of `SplitBase` (whose leaf
+ are the `NamedSplit` objects)
+
+ ```
+ split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50])
+ ```
+
+ 2) The `SplitBase` is forwarded to the `.as_dataset()` function
+ to be resolved into actual read instruction. This is done by the
+ `.get_read_instruction()` method which takes the real dataset splits
+ (name, number of shards,...) and parse the tree to return a
+ `SplitReadInstruction()` object
+
+ ```
+ read_instruction = split.get_read_instruction(self.info.splits)
+ ```
+
+ 3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline
+ to define which files to read and how to skip examples within file.
+
+ """
+
+ # pylint: enable=line-too-long
+
+ @abc.abstractmethod
+ def get_read_instruction(self, split_dict):
+ """Parse the descriptor tree and compile all read instructions together.
+
+ Args:
+ split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset
+
+ Returns:
+ split_read_instruction: `SplitReadInstruction`
+ """
+ raise NotImplementedError("Abstract method")
+
+ def __eq__(self, other):
+ """Equality: datasets.Split.TRAIN == 'train'."""
+ if isinstance(other, (NamedSplit, str)):
+ return False
+ raise NotImplementedError("Equality is not implemented between merged/sub splits.")
+
+ def __ne__(self, other):
+ """InEquality: datasets.Split.TRAIN != 'test'."""
+ return not self.__eq__(other)
+
+ def __add__(self, other):
+ """Merging: datasets.Split.TRAIN + datasets.Split.TEST."""
+ return _SplitMerged(self, other)
+
+ def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name
+ """Divides this split into subsplits.
+
+ There are 3 ways to define subsplits, which correspond to the 3
+ arguments `k` (get `k` even subsplits), `percent` (get a slice of the
+ dataset with `datasets.percent`), and `weighted` (get subsplits with proportions
+ specified by `weighted`).
+
+ Example::
+
+ ```
+ # 50% train, 50% test
+ train, test = split.subsplit(k=2)
+ # 50% train, 25% test, 25% validation
+ train, test, validation = split.subsplit(weighted=[2, 1, 1])
+ # Extract last 20%
+ subsplit = split.subsplit(datasets.percent[-20:])
+ ```
+
+ Warning: k and weighted will be converted into percent which mean that
+ values below the percent will be rounded up or down. The final split may be
+ bigger to deal with remainders. For instance:
+
+ ```
+ train, test, valid = split.subsplit(k=3) # 33%, 33%, 34%
+ s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18%
+ ```
+
+ Args:
+ arg: If no kwargs are given, `arg` will be interpreted as one of
+ `k`, `percent`, or `weighted` depending on the type.
+ For example:
+ ```
+ split.subsplit(10) # Equivalent to split.subsplit(k=10)
+ split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20]
+ split.subsplit([1, 1, 2]) # weighted=[1, 1, 2]
+ ```
+ k: `int` If set, subdivide the split into `k` equal parts.
+ percent: `datasets.percent slice`, return a single subsplit corresponding to
+ a slice of the original split. For example:
+ `split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`.
+ weighted: `list[int]`, return a list of subsplits whose proportions match
+ the normalized sum of the list. For example:
+ `split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`.
+
+ Returns:
+ A subsplit or list of subsplits extracted from this split object.
+ """
+ # Note that the percent kwargs redefine the outer name datasets.percent. This
+ # is done for consistency (.subsplit(percent=datasets.percent[:40]))
+ if sum(bool(x) for x in (arg, k, percent, weighted)) != 1:
+ raise ValueError("Only one argument of subsplit should be set.")
+
+ # Auto deduce k
+ if isinstance(arg, int):
+ k = arg
+ elif isinstance(arg, slice):
+ percent = arg
+ elif isinstance(arg, list):
+ weighted = arg
+
+ if not (k or percent or weighted):
+ raise ValueError(
+ f"Invalid split argument {arg}. Only list, slice and int supported. "
+ "One of k, weighted or percent should be set to a non empty value."
+ )
+
+ def assert_slices_coverage(slices):
+ # Ensure that the expended slices cover all percents.
+ assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100))
+
+ if k:
+ if not 0 < k <= 100:
+ raise ValueError(f"Subsplit k should be between 0 and 100, got {k}")
+ shift = 100 // k
+ slices = [slice(i * shift, (i + 1) * shift) for i in range(k)]
+ # Round up last element to ensure all elements are taken
+ slices[-1] = slice(slices[-1].start, 100)
+ # Internal check to ensure full coverage
+ assert_slices_coverage(slices)
+ return tuple(_SubSplit(self, s) for s in slices)
+ elif percent:
+ return _SubSplit(self, percent)
+ elif weighted:
+ # Normalize the weighted sum
+ total = sum(weighted)
+ weighted = [100 * x // total for x in weighted]
+ # Create the slice for each of the elements
+ start = 0
+ stop = 0
+ slices = []
+ for v in weighted:
+ stop += v
+ slices.append(slice(start, stop))
+ start = stop
+ # Round up last element to ensure all elements are taken
+ slices[-1] = slice(slices[-1].start, 100)
+ # Internal check to ensure full coverage
+ assert_slices_coverage(slices)
+ return tuple(_SubSplit(self, s) for s in slices)
+ else:
+ # Should not be possible
+ raise ValueError("Could not determine the split")
+
+
+# 2 requirements:
+# 1. datasets.percent be sliceable
+# 2. datasets.percent be documented
+#
+# Instances are not documented, so we want datasets.percent to be a class, but to
+# have it be sliceable, we need this metaclass.
+class PercentSliceMeta(type):
+ def __getitem__(cls, slice_value):
+ if not isinstance(slice_value, slice):
+ raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}")
+ return slice_value
+
+
+class PercentSlice(metaclass=PercentSliceMeta):
+ # pylint: disable=line-too-long
+ """Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`.
+
+ See the
+ [guide on splits](../loading#slice-splits)
+ for more information.
+ """
+
+ # pylint: enable=line-too-long
+ pass
+
+
+percent = PercentSlice # pylint: disable=invalid-name
+
+
+class _SplitMerged(SplitBase):
+ """Represent two split descriptors merged together."""
+
+ def __init__(self, split1, split2):
+ self._split1 = split1
+ self._split2 = split2
+
+ def get_read_instruction(self, split_dict):
+ read_instruction1 = self._split1.get_read_instruction(split_dict)
+ read_instruction2 = self._split2.get_read_instruction(split_dict)
+ return read_instruction1 + read_instruction2
+
+ def __repr__(self):
+ return f"({repr(self._split1)} + {repr(self._split2)})"
+
+
+class _SubSplit(SplitBase):
+ """Represent a sub split of a split descriptor."""
+
+ def __init__(self, split, slice_value):
+ self._split = split
+ self._slice_value = slice_value
+
+ def get_read_instruction(self, split_dict):
+ return self._split.get_read_instruction(split_dict)[self._slice_value]
+
+ def __repr__(self):
+ slice_str = "{start}:{stop}"
+ if self._slice_value.step is not None:
+ slice_str += ":{step}"
+ slice_str = slice_str.format(
+ start="" if self._slice_value.start is None else self._slice_value.start,
+ stop="" if self._slice_value.stop is None else self._slice_value.stop,
+ step=self._slice_value.step,
+ )
+ return f"{repr(self._split)}(datasets.percent[{slice_str}])"
+
+
+class NamedSplit(SplitBase):
+ """Descriptor corresponding to a named split (train, test, ...).
+
+ Example:
+ Each descriptor can be composed with other using addition or slice:
+
+ ```py
+ split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST
+ ```
+
+ The resulting split will correspond to 25% of the train split merged with
+ 100% of the test split.
+
+ A split cannot be added twice, so the following will fail:
+
+ ```py
+ split = (
+ datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
+ datasets.Split.TRAIN.subsplit(datasets.percent[75:])
+ ) # Error
+ split = datasets.Split.TEST + datasets.Split.ALL # Error
+ ```
+
+ The slices can be applied only one time. So the following are valid:
+
+ ```py
+ split = (
+ datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
+ datasets.Split.TEST.subsplit(datasets.percent[:50])
+ )
+ split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50])
+ ```
+
+ But this is not valid:
+
+ ```py
+ train = datasets.Split.TRAIN
+ test = datasets.Split.TEST
+ split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25])
+ split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50])
+ ```
+ """
+
+ def __init__(self, name):
+ self._name = name
+ split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")]
+ for split_name in split_names_from_instruction:
+ if not re.match(_split_re, split_name):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.")
+
+ def __str__(self):
+ return self._name
+
+ def __repr__(self):
+ return f"NamedSplit({self._name!r})"
+
+ def __eq__(self, other):
+ """Equality: datasets.Split.TRAIN == 'train'."""
+ if isinstance(other, NamedSplit):
+ return self._name == other._name # pylint: disable=protected-access
+ elif isinstance(other, SplitBase):
+ return False
+ elif isinstance(other, str): # Other should be string
+ return self._name == other
+ else:
+ raise ValueError(f"Equality not supported between split {self} and {other}")
+
+ def __lt__(self, other):
+ return self._name < other._name # pylint: disable=protected-access
+
+ def __hash__(self):
+ return hash(self._name)
+
+ def get_read_instruction(self, split_dict):
+ return SplitReadInstruction(split_dict[self._name])
+
+
+class NamedSplitAll(NamedSplit):
+ """Split corresponding to the union of all defined dataset splits."""
+
+ def __init__(self):
+ super().__init__("all")
+
+ def __repr__(self):
+ return "NamedSplitAll()"
+
+ def get_read_instruction(self, split_dict):
+ # Merge all dataset split together
+ read_instructions = [SplitReadInstruction(s) for s in split_dict.values()]
+ return sum(read_instructions, SplitReadInstruction())
+
+
+class Split:
+ # pylint: disable=line-too-long
+ """`Enum` for dataset splits.
+
+ Datasets are typically split into different subsets to be used at various
+ stages of training and evaluation.
+
+ - `TRAIN`: the training data.
+ - `VALIDATION`: the validation data. If present, this is typically used as
+ evaluation data while iterating on a model (e.g. changing hyperparameters,
+ model architecture, etc.).
+ - `TEST`: the testing data. This is the data to report metrics on. Typically
+ you do not want to use this during model iteration as you may overfit to it.
+ - `ALL`: the union of all defined dataset splits.
+
+ All splits, including compositions inherit from `datasets.SplitBase`.
+
+ See the [guide](../load_hub#splits) on splits for more information.
+
+ Example:
+
+ ```py
+ >>> datasets.SplitGenerator(
+ ... name=datasets.Split.TRAIN,
+ ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)},
+ ... ),
+ ... datasets.SplitGenerator(
+ ... name=datasets.Split.VALIDATION,
+ ... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)},
+ ... ),
+ ... datasets.SplitGenerator(
+ ... name=datasets.Split.TEST,
+ ... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)},
+ ... )
+ ```
+ """
+
+ # pylint: enable=line-too-long
+ TRAIN = NamedSplit("train")
+ TEST = NamedSplit("test")
+ VALIDATION = NamedSplit("validation")
+ ALL = NamedSplitAll()
+
+ def __new__(cls, name):
+ """Create a custom split with datasets.Split('custom_name')."""
+ return NamedSplitAll() if name == "all" else NamedSplit(name)
+
+
+# Similar to SplitInfo, but contain an additional slice info
+SlicedSplitInfo = collections.namedtuple(
+ "SlicedSplitInfo",
+ [
+ "split_info",
+ "slice_value",
+ ],
+) # noqa: E231
+
+
+class SplitReadInstruction:
+ """Object containing the reading instruction for the dataset.
+
+ Similarly to `SplitDescriptor` nodes, this object can be composed with itself,
+ but the resolution happens instantaneously, instead of keeping track of the
+ tree, such as all instructions are compiled and flattened in a single
+ SplitReadInstruction object containing the list of files and slice to use.
+
+ Once resolved, the instructions can be accessed with:
+
+ ```
+ read_instructions.get_list_sliced_split_info() # List of splits to use
+ ```
+
+ """
+
+ def __init__(self, split_info=None):
+ self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with " "itself.")
+
+ if split_info:
+ self.add(SlicedSplitInfo(split_info=split_info, slice_value=None))
+
+ def add(self, sliced_split):
+ """Add a SlicedSplitInfo the read instructions."""
+ # TODO(epot): Check that the number of examples per shard % 100 == 0
+ # Otherwise the slices value may be unbalanced and not exactly reflect the
+ # requested slice.
+ self._splits[sliced_split.split_info.name] = sliced_split
+
+ def __add__(self, other):
+ """Merging split together."""
+ # Will raise error if a split has already be added (NonMutableDict)
+ # TODO(epot): If a split is already added but there is no overlap between
+ # the slices, should merge the slices (ex: [:10] + [80:])
+ split_instruction = SplitReadInstruction()
+ split_instruction._splits.update(self._splits) # pylint: disable=protected-access
+ split_instruction._splits.update(other._splits) # pylint: disable=protected-access
+ return split_instruction
+
+ def __getitem__(self, slice_value):
+ """Sub-splits."""
+ # Will raise an error if a split has already been sliced
+ split_instruction = SplitReadInstruction()
+ for v in self._splits.values():
+ if v.slice_value is not None:
+ raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced")
+ v = v._asdict()
+ v["slice_value"] = slice_value
+ split_instruction.add(SlicedSplitInfo(**v))
+ return split_instruction
+
+ def get_list_sliced_split_info(self):
+ return list(self._splits.values())
+
+
+class SplitDict(dict):
+ """Split info object."""
+
+ def __init__(self, *args, dataset_name=None, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.dataset_name = dataset_name
+
+ def __getitem__(self, key: Union[SplitBase, str]):
+ # 1st case: The key exists: `info.splits['train']`
+ if str(key) in self:
+ return super().__getitem__(str(key))
+ # 2nd case: Uses instructions: `info.splits['train[50%]']`
+ else:
+ instructions = make_file_instructions(
+ name=self.dataset_name,
+ split_infos=self.values(),
+ instruction=key,
+ )
+ return SubSplitInfo(instructions)
+
+ def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo):
+ if key != value.name:
+ raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')")
+ super().__setitem__(key, value)
+
+ def add(self, split_info: SplitInfo):
+ """Add the split info."""
+ if split_info.name in self:
+ raise ValueError(f"Split {split_info.name} already present")
+ split_info.dataset_name = self.dataset_name
+ super().__setitem__(split_info.name, split_info)
+
+ @property
+ def total_num_examples(self):
+ """Return the total number of examples."""
+ return sum(s.num_examples for s in self.values())
+
+ @classmethod
+ def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None):
+ """Returns a new SplitDict initialized from a Dict or List of `split_infos`."""
+ if isinstance(split_infos, dict):
+ split_infos = list(split_infos.values())
+
+ if dataset_name is None:
+ dataset_name = split_infos[0].get("dataset_name") if split_infos else None
+
+ split_dict = cls(dataset_name=dataset_name)
+
+ for split_info in split_infos:
+ if isinstance(split_info, dict):
+ split_info = SplitInfo(**split_info)
+ split_dict.add(split_info)
+
+ return split_dict
+
+ def to_split_dict(self):
+ """Returns a list of SplitInfo protos that we have."""
+ out = []
+ for split_name, split_info in self.items():
+ split_info = copy.deepcopy(split_info)
+ split_info.name = split_name
+ out.append(split_info)
+ return out
+
+ def copy(self):
+ return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name)
+
+ def _to_yaml_list(self) -> list:
+ out = [asdict(s) for s in self.to_split_dict()]
+ # we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc
+ for split_info_dict in out:
+ split_info_dict.pop("shard_lengths", None)
+ # we don't need the dataset_name attribute that is deprecated
+ for split_info_dict in out:
+ split_info_dict.pop("dataset_name", None)
+ return out
+
+ @classmethod
+ def _from_yaml_list(cls, yaml_data: list) -> "SplitDict":
+ return cls.from_split_dict(yaml_data)
+
+
+@dataclass
+class SplitGenerator:
+ """Defines the split information for the generator.
+
+ This should be used as returned value of
+ `GeneratorBasedBuilder._split_generators`.
+ See `GeneratorBasedBuilder._split_generators` for more info and example
+ of usage.
+
+ Args:
+ name (`str`):
+ Name of the `Split` for which the generator will
+ create the examples.
+ **gen_kwargs (additional keyword arguments):
+ Keyword arguments to forward to the `DatasetBuilder._generate_examples` method
+ of the builder.
+
+ Example:
+
+ ```py
+ >>> datasets.SplitGenerator(
+ ... name=datasets.Split.TRAIN,
+ ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)},
+ ... )
+ ```
+ """
+
+ name: str
+ gen_kwargs: Dict = dataclasses.field(default_factory=dict)
+ split_info: SplitInfo = dataclasses.field(init=False)
+
+ def __post_init__(self):
+ self.name = str(self.name) # Make sure we convert NamedSplits in strings
+ NamedSplit(self.name) # check that it's a valid split name
+ self.split_info = SplitInfo(name=self.name)
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Efate b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Efate
new file mode 100644
index 0000000000000000000000000000000000000000..777325fc6c6da8795d89aed5206c60f8bf80b0ab
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Efate differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Gambier b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Gambier
new file mode 100644
index 0000000000000000000000000000000000000000..84acaf41520d2d302f75e300e2b47c5527218df4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Gambier differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Guam b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Guam
new file mode 100644
index 0000000000000000000000000000000000000000..66490d25dff9bcc8f710b0141f1a02e64aeb32f3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Guam differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Johnston b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Johnston
new file mode 100644
index 0000000000000000000000000000000000000000..c7cd060159bd22fc5e6f10ac5a2089afb2c19c6a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Johnston differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kwajalein b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kwajalein
new file mode 100644
index 0000000000000000000000000000000000000000..1887a607422edd499fdf24afe91a04294f1caf6f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Kwajalein differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Nauru b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Nauru
new file mode 100644
index 0000000000000000000000000000000000000000..3339b6cf86d6e98ba70c9bd6cab3dbf50588acd2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Nauru differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Noumea b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Noumea
new file mode 100644
index 0000000000000000000000000000000000000000..959cc8cd26f8a7b10a70e0f93bf3b3c9bbc680d2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Noumea differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pohnpei b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pohnpei
new file mode 100644
index 0000000000000000000000000000000000000000..1ab8353464ddb93947f871f07cfd12540373269c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Pohnpei differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Rarotonga b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Rarotonga
new file mode 100644
index 0000000000000000000000000000000000000000..184a87c112b99536acbafa1ec7aba52c7a94b549
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Rarotonga differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tahiti b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tahiti
new file mode 100644
index 0000000000000000000000000000000000000000..481edd30580f00eccf69de4f1c332fc048210011
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Tahiti differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Truk b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Truk
new file mode 100644
index 0000000000000000000000000000000000000000..7be2474dd91c8a7da181fcda09d838254b890d75
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Truk differ
diff --git a/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Wake b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Wake
new file mode 100644
index 0000000000000000000000000000000000000000..47661d40a4188eb39e8d52e5af8ab23ef7f23766
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pytz/zoneinfo/Pacific/Wake differ