diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..39058314bdc320499e08a46d0246b54293f7365b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/__init__.py
@@ -0,0 +1,70 @@
+# ruff: noqa
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "2.18.0"
+
+from .arrow_dataset import Dataset
+from .arrow_reader import ReadInstruction
+from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
+from .combine import concatenate_datasets, interleave_datasets
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download import *
+from .features import *
+from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
+from .info import DatasetInfo, MetricInfo
+from .inspect import (
+ get_dataset_config_info,
+ get_dataset_config_names,
+ get_dataset_default_config_name,
+ get_dataset_infos,
+ get_dataset_split_names,
+ inspect_dataset,
+ inspect_metric,
+ list_datasets,
+ list_metrics,
+)
+from .iterable_dataset import IterableDataset
+from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
+from .metric import Metric
+from .splits import (
+ NamedSplit,
+ NamedSplitAll,
+ Split,
+ SplitBase,
+ SplitDict,
+ SplitGenerator,
+ SplitInfo,
+ SubSplitInfo,
+ percent,
+)
+from .tasks import *
+from .utils import *
+from .utils import logging
+
+
+# deprecated modules
+from datasets import arrow_dataset as _arrow_dataset # isort:skip
+from datasets import utils as _utils # isort:skip
+from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
+
+_arrow_dataset.concatenate_datasets = concatenate_datasets
+_utils.DownloadConfig = DownloadConfig
+_utils.DownloadManager = DownloadManager
+_utils.DownloadMode = DownloadMode
+_deprecated_download_manager.DownloadConfig = DownloadConfig
+_deprecated_download_manager.DownloadMode = DownloadMode
+_deprecated_download_manager.DownloadManager = DownloadManager
+
+del _arrow_dataset, _utils, _deprecated_download_manager
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/arrow_dataset.py b/env-llmeval/lib/python3.10/site-packages/datasets/arrow_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..95126664561437aec7f066a75f4ca0470c5c0a17
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/arrow_dataset.py
@@ -0,0 +1,6277 @@
+# Copyright 2020 The HuggingFace Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Simple Dataset wrapping an Arrow Table."""
+
+import contextlib
+import copy
+import fnmatch
+import itertools
+import json
+import math
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tempfile
+import time
+import warnings
+import weakref
+from collections import Counter
+from collections.abc import Mapping
+from copy import deepcopy
+from functools import partial, wraps
+from io import BytesIO
+from math import ceil, floor
+from pathlib import Path
+from random import sample
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ BinaryIO,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ overload,
+)
+from typing import Sequence as Sequence_
+
+import fsspec
+import numpy as np
+import pandas as pd
+import pyarrow as pa
+import pyarrow.compute as pc
+from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from . import config
+from .arrow_reader import ArrowReader
+from .arrow_writer import ArrowWriter, OptimizedTypedSequence
+from .data_files import sanitize_patterns
+from .download.streaming_download_manager import xgetsize
+from .features import Audio, ClassLabel, Features, Image, Sequence, Value
+from .features.features import (
+ FeatureType,
+ _align_features,
+ _check_if_features_can_be_aligned,
+ generate_from_arrow_type,
+ pandas_types_mapper,
+ require_decoding,
+)
+from .filesystems import is_remote_filesystem
+from .fingerprint import (
+ fingerprint_transform,
+ format_kwargs_for_fingerprint,
+ format_transform_for_fingerprint,
+ generate_fingerprint,
+ generate_random_fingerprint,
+ get_temporary_cache_files_directory,
+ is_caching_enabled,
+ maybe_register_dataset_for_temp_dir_deletion,
+ update_fingerprint,
+ validate_fingerprint,
+)
+from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table
+from .formatting.formatting import LazyDict, _is_range_contiguous
+from .info import DatasetInfo, DatasetInfosDict
+from .naming import _split_re
+from .search import IndexableMixin
+from .splits import NamedSplit, Split, SplitDict, SplitInfo
+from .table import (
+ InMemoryTable,
+ MemoryMappedTable,
+ Table,
+ _memory_mapped_record_batch_reader_from_file,
+ cast_array_to_feature,
+ concat_tables,
+ embed_table_storage,
+ list_table_cache_files,
+ table_cast,
+ table_iter,
+ table_visitor,
+)
+from .tasks import TaskTemplate
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import estimate_dataset_size
+from .utils.hub import list_files_info, preupload_lfs_files
+from .utils.info_utils import is_small_dataset
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import (
+ Literal,
+ asdict,
+ convert_file_size_to_int,
+ glob_pattern_to_regex,
+ iflatmap_unordered,
+ string_to_dict,
+ unique_values,
+)
+from .utils.stratify import stratified_shuffle_split_generate_indices
+from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf
+from .utils.typing import ListLike, PathLike
+
+
+if TYPE_CHECKING:
+ import sqlite3
+
+ import pyspark
+ import sqlalchemy
+
+ from .dataset_dict import DatasetDict
+ from .iterable_dataset import IterableDataset
+
+logger = logging.get_logger(__name__)
+
+PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED = (
+ "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.parquet"
+)
+
+
+class DatasetInfoMixin:
+ """This base class exposes some attributes of DatasetInfo
+ at the base level of the Dataset for easy access.
+ """
+
+ def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]):
+ self._info = info
+ self._split = split
+
+ @property
+ def info(self):
+ """[`~datasets.DatasetInfo`] object containing all the metadata in the dataset."""
+ return self._info
+
+ @property
+ def split(self):
+ """[`~datasets.NamedSplit`] object corresponding to a named dataset split."""
+ return self._split
+
+ @property
+ def builder_name(self) -> str:
+ return self._info.builder_name
+
+ @property
+ def citation(self) -> str:
+ return self._info.citation
+
+ @property
+ def config_name(self) -> str:
+ return self._info.config_name
+
+ @property
+ def dataset_size(self) -> Optional[int]:
+ return self._info.dataset_size
+
+ @property
+ def description(self) -> str:
+ return self._info.description
+
+ @property
+ def download_checksums(self) -> Optional[dict]:
+ return self._info.download_checksums
+
+ @property
+ def download_size(self) -> Optional[int]:
+ return self._info.download_size
+
+ @property
+ def features(self) -> Optional[Features]:
+ return self._info.features.copy() if self._info.features is not None else None
+
+ @property
+ def homepage(self) -> Optional[str]:
+ return self._info.homepage
+
+ @property
+ def license(self) -> Optional[str]:
+ return self._info.license
+
+ @property
+ def size_in_bytes(self) -> Optional[int]:
+ return self._info.size_in_bytes
+
+ @property
+ def supervised_keys(self):
+ return self._info.supervised_keys
+
+ @property
+ def task_templates(self):
+ return self._info.task_templates
+
+ @property
+ def version(self):
+ return self._info.version
+
+
+class TensorflowDatasetMixin:
+ _TF_DATASET_REFS = set()
+
+ @staticmethod
+ def _get_output_signature(
+ dataset: "Dataset",
+ collate_fn: Callable,
+ collate_fn_args: dict,
+ cols_to_retain: Optional[List[str]] = None,
+ batch_size: Optional[int] = None,
+ num_test_batches: int = 20,
+ ):
+ """Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset
+ after being passed through the collate_fn. Tensorflow needs an exact signature for tf.numpy_function, so
+ the only way to do this is to run test batches - the collator may add or rename columns, so we can't figure
+ it out just by inspecting the dataset.
+
+ Args:
+ dataset (`Dataset`): Dataset to load samples from.
+ collate_fn(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
+ validation/evaluation.
+ collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
+ `collate_fn`.
+ batch_size (`int`, optional): The size of batches loaded from the dataset. Used for shape inference.
+ Can be None, which indicates that batch sizes can be variable.
+ num_test_batches (`int`): The number of batches to load from the dataset for shape inference.
+
+ Returns:
+ `dict`: Dict mapping column names to tf.Tensorspec objects
+ `dict`: Dict mapping column names to np.dtype objects
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ if len(dataset) == 0:
+ raise ValueError("Unable to get the output signature because the dataset is empty.")
+ if batch_size is not None:
+ batch_size = min(len(dataset), batch_size)
+ test_batch_size = 1
+
+ if cols_to_retain is not None:
+ cols_to_retain = list(set(cols_to_retain + ["label_ids", "label", "labels"]))
+
+ test_batches = []
+ for _ in range(num_test_batches):
+ indices = sample(range(len(dataset)), test_batch_size)
+ test_batch = dataset[indices]
+ if cols_to_retain is not None:
+ test_batch = {key: value for key, value in test_batch.items() if key in cols_to_retain}
+ test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)]
+ test_batch = collate_fn(test_batch, **collate_fn_args)
+ test_batches.append(test_batch)
+
+ tf_columns_to_signatures = {}
+ np_columns_to_dtypes = {}
+ for column in test_batches[0].keys():
+ raw_arrays = [batch[column] for batch in test_batches]
+ # In case the collate_fn returns something strange
+ np_arrays = []
+ for array in raw_arrays:
+ if isinstance(array, np.ndarray):
+ np_arrays.append(array)
+ elif isinstance(array, tf.Tensor):
+ np_arrays.append(array.numpy())
+ else:
+ np_arrays.append(np.array(array))
+
+ if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool:
+ tf_dtype = tf.int64
+ np_dtype = np.int64
+ elif np.issubdtype(np_arrays[0].dtype, np.number):
+ tf_dtype = tf.float32
+ np_dtype = np.float32
+ elif np_arrays[0].dtype.kind == "U": # Unicode strings
+ np_dtype = np.unicode_
+ tf_dtype = tf.string
+ else:
+ raise RuntimeError(
+ f"Unrecognized array dtype {np_arrays[0].dtype}. \n"
+ "Nested types and image/audio types are not supported yet."
+ )
+ shapes = [array.shape for array in np_arrays]
+ static_shape = []
+ for dim in range(len(shapes[0])):
+ sizes = {shape[dim] for shape in shapes}
+ if dim == 0:
+ static_shape.append(batch_size)
+ continue
+ if len(sizes) == 1: # This dimension looks constant
+ static_shape.append(sizes.pop())
+ else: # Use None for variable dimensions
+ static_shape.append(None)
+ tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype)
+ np_columns_to_dtypes[column] = np_dtype
+
+ return tf_columns_to_signatures, np_columns_to_dtypes
+
+ def to_tf_dataset(
+ self,
+ batch_size: Optional[int] = None,
+ columns: Optional[Union[str, List[str]]] = None,
+ shuffle: bool = False,
+ collate_fn: Optional[Callable] = None,
+ drop_remainder: bool = False,
+ collate_fn_args: Optional[Dict[str, Any]] = None,
+ label_cols: Optional[Union[str, List[str]]] = None,
+ prefetch: bool = True,
+ num_workers: int = 0,
+ num_test_batches: int = 20,
+ ):
+ """Create a `tf.data.Dataset` from the underlying Dataset. This `tf.data.Dataset` will load and collate batches from
+ the Dataset, and is suitable for passing to methods like `model.fit()` or `model.predict()`. The dataset will yield
+ `dicts` for both inputs and labels unless the `dict` would contain only a single key, in which case a raw
+ `tf.Tensor` is yielded instead.
+
+ Args:
+ batch_size (`int`, *optional*):
+ Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be
+ batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
+ columns (`List[str]` or `str`, *optional*):
+ Dataset column(s) to load in the `tf.data.Dataset`.
+ Column names that are created by the `collate_fn` and that do not exist in the original dataset can be used.
+ shuffle(`bool`, defaults to `False`):
+ Shuffle the dataset order when loading. Recommended `True` for training, `False` for
+ validation/evaluation.
+ drop_remainder(`bool`, defaults to `False`):
+ Drop the last incomplete batch when loading. Ensures
+ that all batches yielded by the dataset will have the same length on the batch dimension.
+ collate_fn(`Callable`, *optional*):
+ A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`, *optional*):
+ An optional `dict` of keyword arguments to be passed to the
+ `collate_fn`.
+ label_cols (`List[str]` or `str`, defaults to `None`):
+ Dataset column(s) to load as labels.
+ Note that many models compute loss internally rather than letting Keras do it, in which case
+ passing the labels here is optional, as long as they're in the input `columns`.
+ prefetch (`bool`, defaults to `True`):
+ Whether to run the dataloader in a separate thread and maintain
+ a small buffer of batches for training. Improves performance by allowing data to be loaded in the
+ background while the model is training.
+ num_workers (`int`, defaults to `0`):
+ Number of workers to use for loading the dataset. Only supported on Python versions >= 3.8.
+ num_test_batches (`int`, defaults to `20`):
+ Number of batches to use to infer the output signature of the dataset.
+ The higher this number, the more accurate the signature will be, but the longer it will take to
+ create the dataset.
+
+ Returns:
+ `tf.data.Dataset`
+
+ Example:
+
+ ```py
+ >>> ds_train = ds["train"].to_tf_dataset(
+ ... columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ ... shuffle=True,
+ ... batch_size=16,
+ ... collate_fn=data_collator,
+ ... )
+ ```
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ if (isinstance(columns, list) and len(columns) == 1) or (
+ isinstance(label_cols, list) and len(label_cols) == 1
+ ):
+ warnings.warn(
+ "The output of `to_tf_dataset` will change when a passing single element list for `labels` or "
+ "`columns` in the next datasets version. To return a tuple structure rather than dict, pass a "
+ "single string.\n"
+ "Old behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n"
+ " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \n"
+ "New behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n"
+ " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ",
+ FutureWarning,
+ )
+
+ if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
+ logger.warning(
+ "Note that to_tf_dataset() loads the data with a generator rather than a full tf.data "
+ "pipeline and is not compatible with remote TPU connections. If you encounter errors, please "
+ "try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of "
+ "Tensors instead of streaming with to_tf_dataset()."
+ )
+
+ if collate_fn is None:
+ # Set a very simple default collator that just stacks things together
+ collate_fn = minimal_tf_collate_fn
+ if collate_fn_args is None:
+ collate_fn_args = {}
+ if label_cols and not columns:
+ raise ValueError("Cannot specify label_cols without specifying columns!")
+ if label_cols is None:
+ label_cols = []
+ elif isinstance(label_cols, str):
+ label_cols = [label_cols]
+ if len(set(label_cols)) < len(label_cols):
+ raise ValueError("List of label_cols contains duplicates.")
+ if columns:
+ if isinstance(columns, str):
+ columns = [columns]
+ if len(set(columns)) < len(columns):
+ raise ValueError("List of columns contains duplicates.")
+ cols_to_retain = list(set(columns + label_cols))
+ else:
+ cols_to_retain = None # Indicates keeping all valid columns
+ columns = []
+
+ if self.format["type"] not in ["custom", "numpy"]:
+ dataset = self.with_format("numpy")
+ else:
+ dataset = self
+
+ # TODO(Matt, QL): deprecate the retention of label_ids and label
+
+ output_signature, columns_to_np_types = dataset._get_output_signature(
+ dataset,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ cols_to_retain=cols_to_retain,
+ batch_size=batch_size if drop_remainder else None,
+ num_test_batches=num_test_batches,
+ )
+
+ if "labels" in output_signature:
+ if ("label_ids" in columns or "label" in columns) and "labels" not in columns:
+ columns = [col for col in columns if col not in ["label_ids", "label"]] + ["labels"]
+ if ("label_ids" in label_cols or "label" in label_cols) and "labels" not in label_cols:
+ label_cols = [col for col in label_cols if col not in ["label_ids", "label"]] + ["labels"]
+
+ for col in columns:
+ if col not in output_signature:
+ raise ValueError(f"Column {col} not found in dataset!")
+
+ for col in label_cols:
+ if col not in output_signature:
+ raise ValueError(f"Label column {col} not found in dataset!")
+
+ if num_workers == 0:
+ tf_dataset = dataset_to_tf(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ )
+ elif num_workers > 0:
+ if batch_size is None:
+ raise NotImplementedError(
+ "`batch_size` must be specified when using multiple workers, as unbatched multiprocessing "
+ "is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0."
+ )
+ tf_dataset = multiprocess_dataset_to_tf(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ num_workers=num_workers,
+ )
+ else:
+ raise ValueError("num_workers must be >= 0")
+
+ def split_features_and_labels(input_batch):
+ # TODO(Matt, QL): deprecate returning the dict content when there's only one key
+ features = {key: tensor for key, tensor in input_batch.items() if key in columns}
+ labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols}
+ if len(features) == 1:
+ features = list(features.values())[0]
+ if len(labels) == 1:
+ labels = list(labels.values())[0]
+ if isinstance(labels, dict) and len(labels) == 0:
+ return features
+ else:
+ return features, labels
+
+ if cols_to_retain is not None:
+ tf_dataset = tf_dataset.map(split_features_and_labels)
+
+ if prefetch:
+ tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
+
+ # Remove a reference to the open Arrow file on delete
+ def cleanup_callback(ref):
+ dataset.__del__()
+ self._TF_DATASET_REFS.remove(ref)
+
+ self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback))
+
+ return tf_dataset
+
+
+class DatasetTransformationNotAllowedError(Exception):
+ pass
+
+
+def transmit_format(func):
+ """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset"""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Dataset" = args[0]
+ args = args[1:]
+ else:
+ self: "Dataset" = kwargs.pop("self")
+ # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None
+ unformatted_columns = set(self.column_names) - set(self._format_columns or [])
+ self_format = {
+ "type": self._format_type,
+ "format_kwargs": self._format_kwargs,
+ "columns": self._format_columns,
+ "output_all_columns": self._output_all_columns,
+ }
+ # apply actual function
+ out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
+ datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
+ # re-apply format to the output
+ for dataset in datasets:
+ new_format = self_format.copy()
+ if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns)
+ # sort the columns to have a deterministic list of columns that we can compare with `out_format`
+ new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns)
+ out_format = {
+ "type": dataset._format_type,
+ "format_kwargs": dataset._format_kwargs,
+ "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None,
+ "output_all_columns": dataset._output_all_columns,
+ }
+ if out_format != new_format:
+ fingerprint = dataset._fingerprint
+ dataset.set_format(**new_format)
+ dataset._fingerprint = fingerprint
+ return out
+
+ wrapper._decorator_name_ = "transmit_format"
+ return wrapper
+
+
+def transmit_tasks(func):
+ """Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset"""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Dataset" = args[0]
+ args = args[1:]
+ else:
+ self: "Dataset" = kwargs.pop("self")
+ # apply actual function
+ out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
+ datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
+ for dataset in datasets:
+ # Remove task templates if a column mapping of the template is no longer valid
+ if self.info.task_templates is not None:
+ dataset.info.task_templates = [
+ template
+ for template in self.info.task_templates
+ if all(
+ dataset._info.features.get(k) == self._info.features.get(k)
+ for k in template.column_mapping.keys()
+ )
+ ]
+ return out
+
+ wrapper._decorator_name_ = "transmit_tasks"
+ return wrapper
+
+
+def update_metadata_with_features(table: Table, features: Features):
+ """To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema."""
+ features = Features({col_name: features[col_name] for col_name in table.column_names})
+ if table.schema.metadata is None or b"huggingface" not in table.schema.metadata:
+ pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features))
+ else:
+ metadata = json.loads(table.schema.metadata[b"huggingface"].decode())
+ if "info" not in metadata:
+ metadata["info"] = asdict(DatasetInfo(features=features))
+ else:
+ metadata["info"]["features"] = asdict(DatasetInfo(features=features))["features"]
+ pa_metadata = {"huggingface": json.dumps(metadata)}
+ table = table.replace_schema_metadata(pa_metadata)
+ return table
+
+
+def _check_table(table) -> Table:
+ """We check the table type to make sure it's an instance of :class:`datasets.table.Table`"""
+ if isinstance(table, pa.Table):
+ # for a pyarrow table, we can just consider it as a in-memory table
+ # this is here for backward compatibility
+ return InMemoryTable(table)
+ elif isinstance(table, Table):
+ return table
+ else:
+ raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.")
+
+
+def _check_column_names(column_names: List[str]):
+ """Check the column names to make sure they don't contain duplicates."""
+ counter = Counter(column_names)
+ if not all(count == 1 for count in counter.values()):
+ duplicated_columns = [col for col in counter if counter[col] > 1]
+ raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.")
+
+
+def _check_valid_indices_value(index, size):
+ if (index < 0 and index + size < 0) or (index >= size):
+ raise IndexError(f"Index {index} out of range for dataset of size {size}.")
+
+
+class NonExistentDatasetError(Exception):
+ """Used when we expect the existence of a dataset"""
+
+ pass
+
+
+class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin):
+ """A Dataset backed by an Arrow table."""
+
+ def __init__(
+ self,
+ arrow_table: Table,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_table: Optional[Table] = None,
+ fingerprint: Optional[str] = None,
+ ):
+ info = info.copy() if info is not None else DatasetInfo()
+ DatasetInfoMixin.__init__(self, info=info, split=split)
+ IndexableMixin.__init__(self)
+
+ self._data: Table = _check_table(arrow_table)
+ self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None
+ maybe_register_dataset_for_temp_dir_deletion(self)
+
+ self._format_type: Optional[str] = None
+ self._format_kwargs: dict = {}
+ self._format_columns: Optional[list] = None
+ self._output_all_columns: bool = False
+ self._fingerprint: str = fingerprint
+
+ # Read metadata
+
+ if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata:
+ metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode())
+ if (
+ "fingerprint" in metadata and self._fingerprint is None
+ ): # try to load fingerprint from the arrow file metadata
+ self._fingerprint = metadata["fingerprint"]
+
+ # Infer features if None
+ inferred_features = Features.from_arrow_schema(arrow_table.schema)
+ if self.info.features is None:
+ self.info.features = inferred_features
+ else: # make sure the nested columns are in the right order
+ try:
+ self.info.features = self.info.features.reorder_fields_as(inferred_features)
+ except ValueError as e:
+ raise ValueError(
+ f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file."
+ )
+
+ # Infer fingerprint if None
+
+ if self._fingerprint is None:
+ self._fingerprint = generate_fingerprint(self)
+
+ # Sanity checks
+
+ if self._info.features is None:
+ raise ValueError("Features can't be None in a Dataset object")
+ if self._fingerprint is None:
+ raise ValueError("Fingerprint can't be None in a Dataset object")
+ if self.info.features.type != inferred_features.type:
+ raise ValueError(
+ f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}"
+ )
+
+ if self._indices is not None:
+ if not pa.types.is_unsigned_integer(self._indices.column(0).type):
+ raise ValueError(
+ f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}"
+ )
+ _check_column_names(self._data.column_names)
+
+ self._data = update_metadata_with_features(self._data, self._info.features)
+
+ @property
+ def features(self) -> Features:
+ features = super().features
+ if features is None: # this is already checked in __init__
+ raise ValueError("Features can't be None in a Dataset object")
+ return features
+
+ @classmethod
+ def from_file(
+ cls,
+ filename: str,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_filename: Optional[str] = None,
+ in_memory: bool = False,
+ ) -> "Dataset":
+ """Instantiate a Dataset backed by an Arrow table at filename.
+
+ Args:
+ filename (`str`):
+ File name of the dataset.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ indices_filename (`str`, *optional*):
+ File names of the indices.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ [`Dataset`]
+ """
+ table = ArrowReader.read_table(filename, in_memory=in_memory)
+
+ if indices_filename is not None:
+ indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory)
+ else:
+ indices_pa_table = None
+
+ return cls(
+ arrow_table=table,
+ info=info,
+ split=split,
+ indices_table=indices_pa_table,
+ )
+
+ @classmethod
+ def from_buffer(
+ cls,
+ buffer: pa.Buffer,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_buffer: Optional[pa.Buffer] = None,
+ ) -> "Dataset":
+ """Instantiate a Dataset backed by an Arrow buffer.
+
+ Args:
+ buffer (`pyarrow.Buffer`):
+ Arrow buffer.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ indices_buffer (`pyarrow.Buffer`, *optional*):
+ Indices Arrow buffer.
+
+ Returns:
+ [`Dataset`]
+ """
+ table = InMemoryTable.from_buffer(buffer)
+
+ if indices_buffer is not None:
+ indices_table = InMemoryTable.from_buffer(buffer)
+ else:
+ indices_table = None
+
+ return cls(table, info=info, split=split, indices_table=indices_table)
+
+ @classmethod
+ def from_pandas(
+ cls,
+ df: pd.DataFrame,
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ preserve_index: Optional[bool] = None,
+ ) -> "Dataset":
+ """
+ Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`].
+
+ The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the
+ DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the
+ case of `object`, we need to guess the datatype by looking at the Python objects in this Series.
+
+ Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow
+ type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only
+ contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit
+ features and passing it to this function.
+
+ Args:
+ df (`pandas.DataFrame`):
+ Dataframe that contains the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ preserve_index (`bool`, *optional*):
+ Whether to store the index as an additional column in the resulting Dataset.
+ The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only.
+ Use `preserve_index=True` to force it to be stored as a column.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_pandas(df)
+ ```
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ table = InMemoryTable.from_pandas(
+ df=df,
+ preserve_index=preserve_index,
+ )
+ if features is not None:
+ # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema)
+ # needed to support the str to Audio conversion for instance
+ table = table.cast(features.arrow_schema)
+ return cls(table, info=info, split=split)
+
+ @classmethod
+ def from_dict(
+ cls,
+ mapping: dict,
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Convert `dict` to a `pyarrow.Table` to create a [`Dataset`].
+
+ Args:
+ mapping (`Mapping`):
+ Mapping of strings to Arrays or Python lists.
+ features ([`Features`], *optional*):
+ Dataset features.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+
+ Returns:
+ [`Dataset`]
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ arrow_typed_mapping = {}
+ for col, data in mapping.items():
+ if isinstance(data, (pa.Array, pa.ChunkedArray)):
+ data = cast_array_to_feature(data, features[col]) if features is not None else data
+ else:
+ data = OptimizedTypedSequence(
+ features.encode_column(data, col) if features is not None else data,
+ type=features[col] if features is not None else None,
+ col=col,
+ )
+ arrow_typed_mapping[col] = data
+ mapping = arrow_typed_mapping
+ pa_table = InMemoryTable.from_pydict(mapping=mapping)
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ if info.features is None:
+ info.features = Features(
+ {
+ col: generate_from_arrow_type(data.type)
+ if isinstance(data, (pa.Array, pa.ChunkedArray))
+ else data.get_inferred_type()
+ for col, data in mapping.items()
+ }
+ )
+ return cls(pa_table, info=info, split=split)
+
+ @classmethod
+ def from_list(
+ cls,
+ mapping: List[dict],
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`.
+
+ Note that the keys of the first entry will be used to determine the dataset columns,
+ regardless of what is passed to features.
+
+ Args:
+ mapping (`List[dict]`): A list of mappings of strings to row values.
+ features (`Features`, optional): Dataset features.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+
+ Returns:
+ [`Dataset`]
+ """
+ # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here
+ mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {}
+ return cls.from_dict(mapping, features, info, split)
+
+ @staticmethod
+ def from_csv(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from CSV file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the CSV file(s).
+ split ([`NamedSplit`], *optional*):
+ Split name to be assigned to the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`pandas.read_csv`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_csv('path/to/dataset.csv')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetReader
+
+ return CsvDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_generator(
+ generator: Callable,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ gen_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create a Dataset from a generator.
+
+ Args:
+ generator (:`Callable`):
+ A generator function that `yields` examples.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ gen_kwargs(`dict`, *optional*):
+ Keyword arguments to be passed to the `generator` callable.
+ You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+ If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to :[`GeneratorConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> def gen():
+ ... yield {"text": "Good", "label": 0}
+ ... yield {"text": "Bad", "label": 1}
+ ...
+ >>> ds = Dataset.from_generator(gen)
+ ```
+
+ ```py
+ >>> def gen(shards):
+ ... for shard in shards:
+ ... with open(shard) as f:
+ ... for line in f:
+ ... yield {"line": line}
+ ...
+ >>> shards = [f"data{i}.txt" for i in range(32)]
+ >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards})
+ ```
+ """
+ from .io.generator import GeneratorDatasetInputStream
+
+ return GeneratorDatasetInputStream(
+ generator=generator,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ gen_kwargs=gen_kwargs,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_json(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ field: Optional[str] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from JSON or JSON Lines file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the JSON or JSON Lines file(s).
+ split ([`NamedSplit`], *optional*):
+ Split name to be assigned to the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ field (`str`, *optional*):
+ Field name of the JSON file where the dataset is contained in.
+ num_proc (`int`, *optional* defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`JsonConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_json('path/to/dataset.json')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetReader
+
+ return JsonDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ field=field,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_parquet(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ columns: Optional[List[str]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from Parquet file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the Parquet file(s).
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ columns (`List[str]`, *optional*):
+ If not `None`, only these columns will be read from the file.
+ A column name may be a prefix of a nested field, e.g. 'a' will select
+ 'a.b', 'a.c', and 'a.d.e'.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`ParquetConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_parquet('path/to/dataset.parquet')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetReader
+
+ return ParquetDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ columns=columns,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_text(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from text file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the text file(s).
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`TextConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_text('path/to/dataset.txt')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.text import TextDatasetReader
+
+ return TextDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_spark(
+ df: "pyspark.sql.DataFrame",
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ keep_in_memory: bool = False,
+ cache_dir: str = None,
+ working_dir: str = None,
+ load_from_cache_file: bool = True,
+ **kwargs,
+ ):
+ """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers.
+
+ Args:
+ df (`pyspark.sql.DataFrame`):
+ The DataFrame containing the desired data.
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both
+ workers and the driver.
+ keep_in_memory (`bool`):
+ Whether to copy the data in-memory.
+ working_dir (`str`, *optional*)
+ Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting
+ a non-NFS intermediate directory may improve performance.
+ load_from_cache_file (`bool`):
+ Whether to load the dataset from the cache if possible.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> df = spark.createDataFrame(
+ >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
+ >>> columns=["id", "name"],
+ >>> )
+ >>> ds = Dataset.from_spark(df)
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.spark import SparkDatasetReader
+
+ if sys.platform == "win32":
+ raise EnvironmentError("Dataset.from_spark is not currently supported on Windows")
+
+ return SparkDatasetReader(
+ df,
+ split=split,
+ features=features,
+ streaming=False,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ working_dir=working_dir,
+ load_from_cache_file=load_from_cache_file,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_sql(
+ sql: Union[str, "sqlalchemy.sql.Selectable"],
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ):
+ """Create Dataset from SQL query or database table.
+
+ Args:
+ sql (`str` or `sqlalchemy.sql.Selectable`):
+ SQL query to be executed or a table name.
+ con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
+ A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`SqlConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> # Fetch a database table
+ >>> ds = Dataset.from_sql("test_data", "postgres:///db_name")
+ >>> # Execute a SQL query on the table
+ >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name")
+ >>> # Use a Selectable object to specify the query
+ >>> from sqlalchemy import select, text
+ >>> stmt = select([text("sentence")]).select_from(text("test_data"))
+ >>> ds = Dataset.from_sql(stmt, "postgres:///db_name")
+ ```
+
+
+
+ The returned dataset can only be cached if `con` is specified as URI string.
+
+
+ """
+ from .io.sql import SqlDatasetReader
+
+ return SqlDatasetReader(
+ sql,
+ con,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ **kwargs,
+ ).read()
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ maybe_register_dataset_for_temp_dir_deletion(self)
+ return self
+
+ def __del__(self):
+ if hasattr(self, "_data"):
+ del self._data
+ if hasattr(self, "_indices"):
+ del self._indices
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
+ self.__del__()
+
+ def save_to_disk(
+ self,
+ dataset_path: PathLike,
+ fs="deprecated",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_shards: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ ):
+ """
+ Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ For [`Image`] and [`Audio`] data:
+
+ All the Image() and Audio() data are stored in the arrow files.
+ If you want to store paths or urls, please use the Value("string") type.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
+ of the dataset directory where the dataset will be saved to.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"50MB"`).
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
+
+
+ num_proc (`int`, *optional*):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> ds.save_to_disk("path/to/dataset/directory")
+ >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
+ >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024)
+ ```
+ """
+ if max_shard_size is not None and num_shards is not None:
+ raise ValueError(
+ "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
+ )
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ if self.list_indexes():
+ raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset")
+
+ if num_shards is None:
+ dataset_nbytes = self._estimate_nbytes()
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
+ num_shards = max(num_shards, num_proc or 1)
+
+ num_proc = num_proc if num_proc is not None else 1
+ num_shards = num_shards if num_shards is not None else num_proc
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options)
+
+ if not is_remote_filesystem(fs):
+ parent_cache_files_paths = {
+ Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files
+ }
+ # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux.
+ if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths:
+ raise PermissionError(
+ f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself."
+ )
+
+ fs.makedirs(dataset_path, exist_ok=True)
+
+ # Get json serializable state
+ state = {
+ key: self.__dict__[key]
+ for key in [
+ "_fingerprint",
+ "_format_columns",
+ "_format_kwargs",
+ "_format_type",
+ "_output_all_columns",
+ ]
+ }
+ state["_split"] = str(self.split) if self.split is not None else self.split
+ state["_data_files"] = [
+ {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards)
+ ]
+ for k in state["_format_kwargs"].keys():
+ try:
+ json.dumps(state["_format_kwargs"][k])
+ except TypeError as e:
+ raise TypeError(
+ str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't."
+ ) from None
+ # Get json serializable dataset info
+ dataset_info = asdict(self._info)
+
+ shards_done = 0
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=len(self),
+ desc=f"Saving the dataset ({shards_done}/{num_shards} shards)",
+ )
+ kwargs_per_job = (
+ {
+ "job_id": shard_idx,
+ "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True),
+ "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"),
+ "storage_options": storage_options,
+ }
+ for shard_idx in range(num_shards)
+ )
+ shard_lengths = [None] * num_shards
+ shard_sizes = [None] * num_shards
+ if num_proc > 1:
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ shards_done += 1
+ pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
+ logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
+ shard_lengths[job_id], shard_sizes[job_id] = content
+ else:
+ pbar.update(content)
+ else:
+ with pbar:
+ for kwargs in kwargs_per_job:
+ for job_id, done, content in Dataset._save_to_disk_single(**kwargs):
+ if done:
+ shards_done += 1
+ pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
+ logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
+ shard_lengths[job_id], shard_sizes[job_id] = content
+ else:
+ pbar.update(content)
+ with fs.open(
+ posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8"
+ ) as state_file:
+ json.dump(state, state_file, indent=2, sort_keys=True)
+ with fs.open(
+ posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8"
+ ) as dataset_info_file:
+ # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True
+ sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)}
+ json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2)
+
+ @staticmethod
+ def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]):
+ batch_size = config.DEFAULT_MAX_BATCH_SIZE
+
+ num_examples_progress_update = 0
+ writer = ArrowWriter(
+ features=shard.features,
+ path=fpath,
+ storage_options=storage_options,
+ embed_local_files=True,
+ )
+ try:
+ _time = time.time()
+ for pa_table in shard.with_format("arrow").iter(batch_size):
+ writer.write_table(pa_table)
+ num_examples_progress_update += len(pa_table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+
+ yield job_id, True, (num_examples, num_bytes)
+
+ @staticmethod
+ def _build_local_temp_path(uri_or_path: str) -> Path:
+ """
+ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative
+ path extracted from the uri) passed.
+
+ Args:
+ uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g.
+ `"s3://my-bucket/dataset/train"`) to concatenate.
+
+ Returns:
+ :class:`Path`: the concatenated path (temp dir + path)
+ """
+ src_dataset_path = Path(uri_or_path)
+ tmp_dir = get_temporary_cache_files_directory()
+ return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor))
+
+ @staticmethod
+ def load_from_disk(
+ dataset_path: str,
+ fs="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ storage_options: Optional[dict] = None,
+ ) -> "Dataset":
+ """
+ Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a
+ filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
+ of the dataset directory where the dataset will be loaded from.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the
+ dataset will not be copied in-memory unless explicitly enabled by setting
+ `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
+ [improve performance](../cache#improve-performance) section.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - If `dataset_path` is a path of a dataset directory, the dataset requested.
+ - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split.
+
+ Example:
+
+ ```py
+ >>> ds = load_from_disk("path/to/dataset/directory")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, [dataset_path] = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options)
+
+ dest_dataset_path = dataset_path
+ dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME)
+ dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
+
+ dataset_dict_is_file = fs.isfile(dataset_dict_json_path)
+ dataset_info_is_file = fs.isfile(dataset_info_path)
+ dataset_state_is_file = fs.isfile(dataset_state_json_path)
+ if not dataset_info_is_file and not dataset_state_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+ if not dataset_info_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+ if not dataset_state_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+
+ # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies
+ if is_remote_filesystem(fs):
+ src_dataset_path = dest_dataset_path
+ dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path)
+ fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True)
+ dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
+
+ with open(dataset_state_json_path, encoding="utf-8") as state_file:
+ state = json.load(state_file)
+ with open(dataset_info_path, encoding="utf-8") as dataset_info_file:
+ dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file))
+
+ dataset_size = estimate_dataset_size(
+ Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]
+ )
+ keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size)
+ table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable
+
+ arrow_table = concat_tables(
+ thread_map(
+ table_cls.from_file,
+ [posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]],
+ tqdm_class=hf_tqdm,
+ desc="Loading dataset from disk",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(state["_data_files"]) <= 16 or None,
+ )
+ )
+
+ split = state["_split"]
+ split = Split(split) if split is not None else split
+
+ dataset = Dataset(
+ arrow_table=arrow_table,
+ info=dataset_info,
+ split=split,
+ fingerprint=state["_fingerprint"],
+ )
+
+ format = {
+ "type": state["_format_type"],
+ "format_kwargs": state["_format_kwargs"],
+ "columns": state["_format_columns"],
+ "output_all_columns": state["_output_all_columns"],
+ }
+ dataset = dataset.with_format(**format)
+
+ return dataset
+
+ @property
+ def data(self) -> Table:
+ """The Apache Arrow table backing the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.data
+ MemoryMappedTable
+ text: string
+ label: int64
+ ----
+ text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]]
+ label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]]
+ ```
+ """
+ return self._data
+
+ @property
+ def cache_files(self) -> List[dict]:
+ """The cache files containing the Apache Arrow table backing the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.cache_files
+ [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]
+ ```
+ """
+ cache_files = list_table_cache_files(self._data)
+ if self._indices is not None:
+ cache_files += list_table_cache_files(self._indices)
+ return [{"filename": cache_filename} for cache_filename in cache_files]
+
+ @property
+ def num_columns(self) -> int:
+ """Number of columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.num_columns
+ 2
+ ```
+ """
+ return self._data.num_columns
+
+ @property
+ def num_rows(self) -> int:
+ """Number of rows in the dataset (same as [`Dataset.__len__`]).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.num_rows
+ 1066
+ ```
+ """
+ if self._indices is not None:
+ return self._indices.num_rows
+ return self._data.num_rows
+
+ @property
+ def column_names(self) -> List[str]:
+ """Names of the columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.column_names
+ ['text', 'label']
+ ```
+ """
+ return self._data.column_names
+
+ @property
+ def shape(self) -> Tuple[int, int]:
+ """Shape of the dataset (number of columns, number of rows).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.shape
+ (1066, 2)
+ ```
+ """
+ if self._indices is not None:
+ return (self._indices.num_rows, self._data.num_columns)
+ return self._data.shape
+
+ def unique(self, column: str) -> List:
+ """Return a list of the unique elements in a column.
+
+ This is implemented in the low-level backend and as such, very fast.
+
+ Args:
+ column (`str`):
+ Column name (list all the column names with [`~datasets.Dataset.column_names`]).
+
+ Returns:
+ `list`: List of unique elements in the given column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.unique('label')
+ [1, 0]
+ ```
+ """
+ if column not in self._data.column_names:
+ raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
+
+ if self._indices is not None and self._indices.num_rows != self._data.num_rows:
+ dataset = self.flatten_indices()
+ else:
+ dataset = self
+
+ return dataset._data.column(column).unique().to_pylist()
+
+ def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset":
+ """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table.
+
+ Args:
+ column (`str`):
+ The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`])
+ include_nulls (`bool`, defaults to `False`):
+ Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("boolq", split="validation")
+ >>> ds.features
+ {'answer': Value(dtype='bool', id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ >>> ds = ds.class_encode_column('answer')
+ >>> ds.features
+ {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ ```
+ """
+ # Sanity checks
+ if column not in self._data.column_names:
+ raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
+ src_feat = self._info.features[column]
+ if not isinstance(src_feat, Value):
+ raise ValueError(
+ f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}."
+ )
+
+ if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)):
+
+ def stringify_column(batch):
+ batch[column] = [
+ str(sample) if include_nulls or sample is not None else None for sample in batch[column]
+ ]
+ return batch
+
+ dset = self.map(
+ stringify_column,
+ batched=True,
+ desc="Stringifying the column",
+ )
+ else:
+ dset = self
+
+ # Create the new feature
+ class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None)
+ dst_feat = ClassLabel(names=class_names)
+
+ def cast_to_class_labels(batch):
+ batch[column] = [
+ dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None
+ for sample in batch[column]
+ ]
+ return batch
+
+ new_features = dset.features.copy()
+ new_features[column] = dst_feat
+
+ dset = dset.map(
+ cast_to_class_labels,
+ batched=True,
+ features=new_features,
+ desc="Casting to class labels",
+ )
+
+ return dset
+
+ @fingerprint_transform(inplace=False)
+ def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset":
+ """Flatten the table.
+ Each column with a struct type is flattened into one column per struct field.
+ Other columns are left unchanged.
+
+ Args:
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with flattened columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad", split="train")
+ >>> ds.features
+ {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ >>> ds.flatten()
+ Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 87599
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ for depth in range(1, max_depth):
+ if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema):
+ dataset._data = dataset._data.flatten()
+ else:
+ break
+ dataset.info.features = self._info.features.flatten(max_depth=max_depth)
+ dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names})
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.')
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ def cast(
+ self,
+ features: Features,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ num_proc: Optional[int] = None,
+ ) -> "Dataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset.
+ batch_size (`int`, defaults to `1000`):
+ Number of examples per batch provided to cast.
+ If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ load_from_cache_file (`bool`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`].
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, ClassLabel, Value
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds.features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ if sorted(features) != sorted(self._data.column_names):
+ raise ValueError(
+ f"The columns in features ({list(features)}) must be identical "
+ f"as the columns in the dataset: {self._data.column_names}"
+ )
+
+ schema = features.arrow_schema
+ format = self.format
+ dataset = self.with_format("arrow")
+ # capture the PyArrow version here to make the lambda serializable on Windows
+ dataset = dataset.map(
+ partial(table_cast, schema=schema),
+ batched=True,
+ batch_size=batch_size,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ num_proc=num_proc,
+ features=features,
+ desc="Casting the dataset",
+ )
+ dataset = dataset.with_format(**format)
+ return dataset
+
+ @fingerprint_transform(inplace=False)
+ def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature (`FeatureType`):
+ Target feature.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ if hasattr(feature, "decode_example"):
+ dataset = copy.deepcopy(self)
+ dataset._info.features[column] = feature
+ dataset._fingerprint = new_fingerprint
+ dataset._data = dataset._data.cast(dataset.features.arrow_schema)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ return dataset
+ else:
+ features = self.features
+ features[column] = feature
+ return self.cast(features)
+
+ @transmit_tasks
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+
+ You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method
+ is in-place (doesn't copy the data to a new dataset) and is thus faster.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.remove_columns('label')
+ Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ >>> ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0
+ Dataset({
+ features: [],
+ num_rows: 0
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ missing_columns = set(column_names) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+
+ for column_name in column_names:
+ del dataset._info.features[column_name]
+
+ dataset._data = dataset._data.drop(column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @fingerprint_transform(inplace=False)
+ def rename_column(
+ self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None
+ ) -> "Dataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.rename_column('label', 'label_new')
+ Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ if original_column_name not in dataset._data.column_names:
+ raise ValueError(
+ f"Original column name {original_column_name} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+ if new_column_name in dataset._data.column_names:
+ raise ValueError(
+ f"New column name {new_column_name} already in the dataset. "
+ f"Please choose a column name which is not already in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+ if not new_column_name:
+ raise ValueError("New column name is empty.")
+
+ def rename(columns):
+ return [new_column_name if col == original_column_name else col for col in columns]
+
+ new_column_names = rename(self._data.column_names)
+ if self._format_columns is not None:
+ dataset._format_columns = rename(self._format_columns)
+
+ dataset._info.features = Features(
+ {
+ new_column_name if col == original_column_name else col: feature
+ for col, feature in self._info.features.items()
+ }
+ )
+
+ dataset._data = dataset._data.rename_columns(new_column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @fingerprint_transform(inplace=False)
+ def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with renamed columns
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
+ Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+
+ extra_columns = set(column_mapping.keys()) - set(dataset.column_names)
+ if extra_columns:
+ raise ValueError(
+ f"Original column names {extra_columns} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+
+ number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values()))
+ if number_of_duplicates_in_new_columns != 0:
+ raise ValueError(
+ "New column names must all be different, but this column mapping "
+ f"has {number_of_duplicates_in_new_columns} duplicates"
+ )
+
+ empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col]
+ if empty_new_columns:
+ raise ValueError(f"New column names {empty_new_columns} are empty.")
+
+ def rename(columns):
+ return [column_mapping[col] if col in column_mapping else col for col in columns]
+
+ new_column_names = rename(self._data.column_names)
+ if self._format_columns is not None:
+ dataset._format_columns = rename(self._format_columns)
+
+ dataset._info.features = Features(
+ {
+ column_mapping[col] if col in column_mapping else col: feature
+ for col, feature in (self._info.features or {}).items()
+ }
+ )
+
+ dataset._data = dataset._data.rename_columns(new_column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """Select one or several column(s) in the dataset and the features
+ associated to them.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform. If `None`,
+ the new fingerprint is computed using a hash of the previous
+ fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset object which only consists of
+ selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.select_columns(['text'])
+ Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ ```
+ """
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ missing_columns = set(column_names) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the "
+ "dataset. Current columns in the dataset: "
+ f"{self._data.column_names}."
+ )
+
+ dataset = copy.deepcopy(self)
+ dataset._data = dataset._data.select(column_names)
+ dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names})
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ def __len__(self):
+ """Number of rows in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.__len__
+
+ ```
+ """
+ return self.num_rows
+
+ def __iter__(self):
+ """Iterate through the examples.
+
+ If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the
+ selected format.
+ """
+ if self._indices is None:
+ # Fast iteration
+ # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
+ format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
+ formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
+ batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER
+ for pa_subtable in table_iter(self.data, batch_size=batch_size):
+ for i in range(pa_subtable.num_rows):
+ pa_subtable_ex = pa_subtable.slice(i, 1)
+ formatted_output = format_table(
+ pa_subtable_ex,
+ 0,
+ formatter=formatter,
+ format_columns=self._format_columns,
+ output_all_columns=self._output_all_columns,
+ )
+ yield formatted_output
+ else:
+ for i in range(self.num_rows):
+ yield self._getitem(
+ i,
+ )
+
+ def iter(self, batch_size: int, drop_last_batch: bool = False):
+ """Iterate through the batches of size `batch_size`.
+
+ If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the
+ selected format.
+
+ Args:
+ batch_size (:obj:`int`): size of each batch to yield.
+ drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
+ dropped
+ """
+ if self._indices is None:
+ # Fast iteration
+ # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
+ format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
+ formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
+ for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch):
+ formatted_batch = format_table(
+ pa_subtable,
+ range(pa_subtable.num_rows),
+ formatter=formatter,
+ format_columns=self._format_columns,
+ output_all_columns=self._output_all_columns,
+ )
+ yield formatted_batch
+ else:
+ num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size
+ for i in range(0, num_rows, batch_size):
+ yield self._getitem(
+ slice(i, i + batch_size),
+ )
+
+ def __repr__(self):
+ return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})"
+
+ @property
+ def format(self):
+ return {
+ "type": self._format_type,
+ "format_kwargs": self._format_kwargs,
+ "columns": self.column_names if self._format_columns is None else self._format_columns,
+ "output_all_columns": self._output_all_columns,
+ }
+
+ @contextlib.contextmanager
+ def formatted_as(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """To be used in a `with` statement. Set `__getitem__` return format (type and columns).
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__`` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+ """
+ old_format_type = self._format_type
+ old_format_kwargs = self._format_kwargs
+ old_format_columns = self._format_columns
+ old_output_all_columns = self._output_all_columns
+ try:
+ self.set_format(type, columns, output_all_columns, **format_kwargs)
+ yield
+ finally:
+ self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)
+
+ @fingerprint_transform(inplace=True)
+ def set_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`].
+
+ Args:
+ type (`str`, *optional*):
+ Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
+ gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as:
+
+ ```
+ new formatted columns = (all columns - previously unformatted columns)
+ ```
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type='numpy', columns=['text', 'label'])
+ >>> ds.format
+ {'type': 'numpy',
+ 'format_kwargs': {},
+ 'columns': ['text', 'label'],
+ 'output_all_columns': False}
+ ```
+ """
+ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format)
+
+ # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter
+ type = get_format_type_from_alias(type)
+ get_formatter(type, features=self._info.features, **format_kwargs)
+
+ # Check filter column
+ if isinstance(columns, str):
+ columns = [columns]
+ if isinstance(columns, tuple):
+ columns = list(columns)
+ if columns is not None:
+ missing_columns = set(columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+ if columns is not None:
+ columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs
+
+ self._format_type = type
+ self._format_kwargs = format_kwargs
+ self._format_columns = columns
+ self._output_all_columns = output_all_columns
+ logger.debug(
+ "Set __getitem__(key) output type to %s for %s columns "
+ " (when key is int or slice) and %s output other (un-formatted) columns.",
+ "python objects" if type is None else type,
+ "no" if columns is None else str(columns),
+ "do" if output_all_columns else "don't",
+ )
+
+ def reset_format(self):
+ """Reset `__getitem__` return format to python objects and all columns.
+
+ Same as `self.set_format()`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds.format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ >>> ds.reset_format()
+ >>> ds.format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ ```
+ """
+ self.set_format()
+
+ def set_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Args:
+ transform (`Callable`, *optional*):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to True, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
+ >>> def encode(batch):
+ ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt')
+ >>> ds.set_transform(encode)
+ >>> ds[0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1]),
+ 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895,
+ 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211,
+ 5637, 1998, 11690, 2336, 1012, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0])}
+ ```
+ """
+ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
+
+ Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object.
+
+ Args:
+ type (`str`, *optional*):
+ Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds.format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'tensorflow'}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+ return dataset
+
+ def with_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object.
+
+ Args:
+ transform (`Callable`, `optional`):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, `optional`):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to `True`, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> def encode(example):
+ ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt')
+ >>> ds = ds.with_transform(encode)
+ >>> ds[0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1]),
+ 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
+ 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
+ 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0])}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
+ return dataset
+
+ @deprecated()
+ def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset":
+ """
+ Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates).
+
+ Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting.
+
+ Args:
+ task (`Union[str, TaskTemplate]`):
+ The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include:
+
+ - `"text-classification"`
+ - `"question-answering"`
+
+ If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates).
+ id (`int`, defaults to `0`):
+ The id required to unambiguously identify the task template when multiple task templates of the same type are supported.
+ """
+ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD
+ if isinstance(task, str):
+ tasks = [template.task for template in (self.info.task_templates or [])]
+ compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task]
+ if not compatible_templates:
+ raise ValueError(
+ f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}"
+ )
+
+ if not 0 <= id < len(compatible_templates):
+ templates_list_str = "\n".join(
+ f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates)
+ )
+ raise ValueError(
+ f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}"
+ )
+ template = compatible_templates[id]
+ elif isinstance(task, TaskTemplate):
+ template = task
+ else:
+ raise ValueError(
+ f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}."
+ )
+ template = template.align_with_features(self.info.features)
+ column_mapping = template.column_mapping
+ columns_to_drop = [column for column in self.column_names if column not in column_mapping]
+ dataset = self.remove_columns(columns_to_drop)
+ dataset = dataset.rename_columns(column_mapping)
+ # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__`
+ dataset.info.task_templates = None
+ dataset = dataset.cast(features=template.features)
+ return dataset
+
+ def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]:
+ """
+ Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices)
+ """
+ if isinstance(key, bool):
+ raise TypeError("dataset index must be int, str, slice or collection of int, not bool")
+ format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type
+ format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns
+ output_all_columns = (
+ kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns
+ )
+ format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs
+ format_kwargs = format_kwargs if format_kwargs is not None else {}
+ formatter = get_formatter(format_type, features=self._info.features, **format_kwargs)
+ pa_subtable = query_table(self._data, key, indices=self._indices)
+ formatted_output = format_table(
+ pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns
+ )
+ return formatted_output
+
+ @overload
+ def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811
+ ...
+
+ @overload
+ def __getitem__(self, key: str) -> List: # noqa: F811
+ ...
+
+ def __getitem__(self, key): # noqa: F811
+ """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools)."""
+ return self._getitem(key)
+
+ def __getitems__(self, keys: List) -> List:
+ """Can be used to get a batch using a list of integers indices."""
+ batch = self.__getitem__(keys)
+ n_examples = len(batch[next(iter(batch))])
+ return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)]
+
+ def cleanup_cache_files(self) -> int:
+ """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is
+ one.
+
+ Be careful when running this command that no other process is currently using other cache files.
+
+ Returns:
+ `int`: Number of removed files.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.cleanup_cache_files()
+ 10
+ ```
+ """
+ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files]
+ if not current_cache_files:
+ return 0
+ cache_directory = os.path.dirname(current_cache_files[0])
+ logger.info(f"Listing files in {cache_directory}")
+ files: List[str] = os.listdir(cache_directory)
+ files_to_remove = []
+ for f_name in files:
+ full_name = os.path.abspath(os.path.join(cache_directory, f_name))
+ if f_name.startswith("cache-") and f_name.endswith(".arrow"):
+ if full_name in current_cache_files:
+ logger.info(f"Keeping currently used cache file at {full_name}")
+ continue
+ files_to_remove.append(full_name)
+ for file_path in files_to_remove:
+ logger.info(f"Removing {file_path}")
+ os.remove(file_path)
+ return len(files_to_remove)
+
+ def _get_cache_file_path(self, fingerprint):
+ if is_caching_enabled() and self.cache_files:
+ cache_file_name = "cache-" + fingerprint + ".arrow"
+ cache_directory = os.path.dirname(self.cache_files[0]["filename"])
+ else:
+ cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow"
+ cache_directory = get_temporary_cache_files_directory()
+ cache_file_path = os.path.join(cache_directory, cache_file_name)
+ return cache_file_path
+
+ @transmit_tasks
+ @transmit_format
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
+ new_fingerprint: Optional[str] = None,
+ desc: Optional[str] = None,
+ ) -> "Dataset":
+ """
+ Apply a function to all the examples in the table (individually or in batches) and update the table.
+ If your function returns a column that already exists, then it overwrites it.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
+ - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`): Function with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific Features to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Disallow null values in the table.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially.
+ suffix_template (`str`):
+ If `cache_file_name` is specified, then this suffix
+ will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for
+ `rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while mapping examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> ds[0:3]["text"]
+ ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
+ 'Review: the soundtrack alone is worth the price of admission .',
+ 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .']
+
+ # process a batch of examples
+ >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
+ # set number of processors
+ >>> ds = ds.map(add_prefix, num_proc=4)
+ ```
+ """
+ if keep_in_memory and cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.")
+
+ if num_proc is not None and num_proc <= 0:
+ raise ValueError("num_proc must be an integer > 0.")
+
+ # If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway)
+ if len(self) == 0:
+ if self._indices is not None: # empty indices mapping
+ self = Dataset(
+ self.data.slice(0, 0),
+ info=self.info.copy(),
+ split=self.split,
+ fingerprint=new_fingerprint,
+ )
+ if remove_columns:
+ return self.remove_columns(remove_columns)
+ else:
+ return self
+
+ if function is None:
+ function = lambda x: x # noqa: E731
+
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ if input_columns is not None:
+ missing_columns = set(input_columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+
+ if isinstance(remove_columns, str):
+ remove_columns = [remove_columns]
+
+ if remove_columns is not None:
+ missing_columns = set(remove_columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if fn_kwargs is None:
+ fn_kwargs = {}
+
+ if num_proc is not None and num_proc > len(self):
+ num_proc = len(self)
+ logger.warning(
+ f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}."
+ )
+
+ dataset_kwargs = {
+ "shard": self,
+ "function": function,
+ "with_indices": with_indices,
+ "with_rank": with_rank,
+ "input_columns": input_columns,
+ "batched": batched,
+ "batch_size": batch_size,
+ "drop_last_batch": drop_last_batch,
+ "remove_columns": remove_columns,
+ "keep_in_memory": keep_in_memory,
+ "writer_batch_size": writer_batch_size,
+ "features": features,
+ "disable_nullable": disable_nullable,
+ "fn_kwargs": fn_kwargs,
+ }
+
+ if new_fingerprint is None:
+ # we create a unique hash from the function,
+ # current dataset file and the mapping args
+ transform = format_transform_for_fingerprint(Dataset._map_single)
+ kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs)
+ kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint"
+ new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)
+ else:
+ validate_fingerprint(new_fingerprint)
+ dataset_kwargs["new_fingerprint"] = new_fingerprint
+
+ if self.cache_files:
+ if cache_file_name is None:
+ cache_file_name = self._get_cache_file_path(new_fingerprint)
+ dataset_kwargs["cache_file_name"] = cache_file_name
+
+ def load_processed_shard_from_cache(shard_kwargs):
+ """Load a processed shard from cache if it exists, otherwise throw an error."""
+ shard = shard_kwargs["shard"]
+ # Check if we've already cached this computation (indexed by a hash)
+ if shard_kwargs["cache_file_name"] is not None:
+ if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file:
+ info = shard.info.copy()
+ info.features = features
+ info.task_templates = None
+ return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split)
+ raise NonExistentDatasetError
+
+ num_shards = num_proc if num_proc is not None else 1
+ if batched and drop_last_batch:
+ pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size
+ else:
+ pbar_total = len(self)
+
+ shards_done = 0
+ if num_proc is None or num_proc == 1:
+ transformed_dataset = None
+ try:
+ transformed_dataset = load_processed_shard_from_cache(dataset_kwargs)
+ logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}")
+ except NonExistentDatasetError:
+ pass
+ if transformed_dataset is None:
+ with hf_tqdm(
+ unit=" examples",
+ total=pbar_total,
+ desc=desc or "Map",
+ ) as pbar:
+ for rank, done, content in Dataset._map_single(**dataset_kwargs):
+ if done:
+ shards_done += 1
+ logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
+ transformed_dataset = content
+ else:
+ pbar.update(content)
+ assert transformed_dataset is not None, "Failed to retrieve the result from map"
+ # update fingerprint if the dataset changed
+ if transformed_dataset._fingerprint != self._fingerprint:
+ transformed_dataset._fingerprint = new_fingerprint
+ return transformed_dataset
+ else:
+
+ def format_cache_file_name(
+ cache_file_name: Optional[str],
+ rank: Union[int, Literal["*"]], # noqa: F722
+ ) -> Optional[str]:
+ if not cache_file_name:
+ return cache_file_name
+ sep = cache_file_name.rindex(".")
+ base_name, extension = cache_file_name[:sep], cache_file_name[sep:]
+ if isinstance(rank, int):
+ cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension
+ logger.info(f"Process #{rank} will write at {cache_file_name}")
+ else:
+ cache_file_name = (
+ base_name
+ + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc)
+ + extension
+ )
+ return cache_file_name
+
+ def format_new_fingerprint(new_fingerprint: str, rank: int) -> str:
+ new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc)
+ validate_fingerprint(new_fingerprint)
+ return new_fingerprint
+
+ prev_env = deepcopy(os.environ)
+ # check if parallelism if off
+ # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22
+ if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in (
+ "",
+ "off",
+ "false",
+ "f",
+ "no",
+ "n",
+ "0",
+ ):
+ logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.")
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
+ shards = [
+ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory)
+ for rank in range(num_proc)
+ ]
+ kwargs_per_job = [
+ {
+ **dataset_kwargs,
+ "shard": shards[rank],
+ "cache_file_name": format_cache_file_name(cache_file_name, rank),
+ "rank": rank,
+ "offset": sum(len(s) for s in shards[:rank]),
+ "new_fingerprint": format_new_fingerprint(new_fingerprint, rank),
+ }
+ for rank in range(num_shards)
+ ]
+
+ transformed_shards = [None] * num_shards
+ for rank in range(num_shards):
+ try:
+ transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank])
+ kwargs_per_job[rank] = None
+ except NonExistentDatasetError:
+ pass
+
+ kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None]
+
+ # We try to create a pool with as many workers as dataset not yet cached.
+ if kwargs_per_job:
+ if len(kwargs_per_job) < num_shards:
+ logger.info(
+ f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache."
+ )
+ with Pool(len(kwargs_per_job)) as pool:
+ os.environ = prev_env
+ logger.info(f"Spawning {num_proc} processes")
+ with hf_tqdm(
+ unit=" examples",
+ total=pbar_total,
+ desc=(desc or "Map") + f" (num_proc={num_proc})",
+ ) as pbar:
+ for rank, done, content in iflatmap_unordered(
+ pool, Dataset._map_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ shards_done += 1
+ logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
+ transformed_shards[rank] = content
+ else:
+ pbar.update(content)
+ # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805)
+ for kwargs in kwargs_per_job:
+ del kwargs["shard"]
+ else:
+ logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}")
+ assert (
+ None not in transformed_shards
+ ), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results"
+ logger.info(f"Concatenating {num_proc} shards")
+ result = _concatenate_map_style_datasets(transformed_shards)
+ # update fingerprint if the dataset changed
+ if any(
+ transformed_shard._fingerprint != shard._fingerprint
+ for transformed_shard, shard in zip(transformed_shards, shards)
+ ):
+ result._fingerprint = new_fingerprint
+ else:
+ result._fingerprint = self._fingerprint
+ return result
+
+ @staticmethod
+ def _map_single(
+ shard: "Dataset",
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[List[str]] = None,
+ keep_in_memory: bool = False,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ new_fingerprint: Optional[str] = None,
+ rank: Optional[int] = None,
+ offset: int = 0,
+ ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]:
+ """Apply a function to all the elements in the table (individually or in batches)
+ and update the table (if function does update examples).
+
+ Args:
+ shard (`datasets.Dataset`): Dataset to map the transform on.
+ function (`Callable`): with one of the following signature:
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: lambda x: x
+ with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`): Provide batch of examples to `function`
+ batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`
+ `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`
+ drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
+ features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`): Disallow null values in the table.
+ fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function`
+ new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing
+ offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`.
+ """
+ if fn_kwargs is None:
+ fn_kwargs = {}
+
+ # If we do batch computation but no batch size is provided, default to the full dataset
+ if batched and (batch_size is None or batch_size <= 0):
+ batch_size = shard.num_rows
+
+ # We set this variable to True after processing the first example/batch in
+ # `apply_function_on_filtered_inputs` if the map function returns a dict.
+ # If set to False, no new arrow table will be created
+
+ update_data = None
+
+ format_kwargs = shard._format_kwargs.copy()
+ # Lazy formatting is only available for the default format (None/python)
+ if not input_columns and shard._format_type is None:
+ format_kwargs["lazy"] = True
+ input_formatter = get_formatter(
+ shard._format_type,
+ features=shard.features,
+ **format_kwargs,
+ )
+
+ class NumExamplesMismatchError(Exception):
+ pass
+
+ def validate_function_output(processed_inputs, indices):
+ """Validate output of the map function."""
+ if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)):
+ raise TypeError(
+ f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects."
+ )
+ elif isinstance(indices, list) and isinstance(processed_inputs, Mapping):
+ allowed_batch_return_types = (list, np.ndarray, pd.Series)
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
+ import tensorflow as tf
+
+ allowed_batch_return_types += (tf.Tensor,)
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ allowed_batch_return_types += (torch.Tensor,)
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
+ import jax.numpy as jnp
+
+ allowed_batch_return_types += (jnp.ndarray,)
+ all_dict_values_are_lists = all(
+ isinstance(value, allowed_batch_return_types) for value in processed_inputs.values()
+ )
+ if all_dict_values_are_lists is False:
+ raise TypeError(
+ f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`."
+ )
+
+ def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0):
+ """Utility to apply the function on a selection of columns."""
+ nonlocal update_data
+ inputs = format_table(
+ pa_inputs,
+ 0 if not batched else range(pa_inputs.num_rows),
+ format_columns=input_columns,
+ formatter=input_formatter,
+ )
+ fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]
+ if offset == 0:
+ effective_indices = indices
+ else:
+ effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset
+ additional_args = ()
+ if with_indices:
+ additional_args += (effective_indices,)
+ if with_rank:
+ additional_args += (rank,)
+ processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
+ if isinstance(processed_inputs, LazyDict):
+ processed_inputs = {
+ k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format
+ }
+ returned_lazy_dict = True
+ else:
+ returned_lazy_dict = False
+ if update_data is None:
+ # Check if the function returns updated examples
+ update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame))
+ validate_function_output(processed_inputs, indices)
+ if not update_data:
+ return None # Nothing to update, let's move on
+ if shard._format_type or input_columns:
+ # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release)
+ inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns()))
+ elif isinstance(inputs, LazyDict):
+ inputs_to_merge = {
+ k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items()
+ }
+ else:
+ inputs_to_merge = inputs
+ if remove_columns is not None:
+ for column in remove_columns:
+ # `function` can modify input in-place causing column to be already removed.
+ if column in inputs_to_merge:
+ inputs_to_merge.pop(column)
+ if returned_lazy_dict and column in processed_inputs:
+ processed_inputs.pop(column)
+ if check_same_num_examples:
+ input_num_examples = len(pa_inputs)
+ processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))])
+ if input_num_examples != processed_inputs_num_examples:
+ raise NumExamplesMismatchError()
+ if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping):
+ # The .map() transform *updates* the dataset:
+ # the output dictionary contains both the the input data and the output data.
+ # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently.
+ return {**inputs_to_merge, **processed_inputs}
+ else:
+ return processed_inputs
+
+ def init_buffer_and_writer():
+ # Prepare output buffer and batched writer in memory or on file if we update the table
+ writer_features = features
+ if writer_features is None:
+ writer_features = shard.features
+ update_features = True
+ else:
+ update_features = False
+ if keep_in_memory or cache_file_name is None:
+ buf_writer = pa.BufferOutputStream()
+ tmp_file = None
+ writer = ArrowWriter(
+ features=writer_features,
+ stream=buf_writer,
+ writer_batch_size=writer_batch_size,
+ update_features=update_features,
+ fingerprint=new_fingerprint,
+ disable_nullable=disable_nullable,
+ )
+ else:
+ buf_writer = None
+ logger.info(f"Caching processed dataset at {cache_file_name}")
+ tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False)
+ writer = ArrowWriter(
+ features=writer_features,
+ path=tmp_file.name,
+ writer_batch_size=writer_batch_size,
+ update_features=update_features,
+ fingerprint=new_fingerprint,
+ disable_nullable=disable_nullable,
+ )
+ return buf_writer, writer, tmp_file
+
+ num_examples_progress_update = 0
+ # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer`
+ buf_writer, writer, tmp_file = None, None, None
+
+ # Optionally initialize the writer as a context manager
+ with contextlib.ExitStack() as stack:
+ try:
+ arrow_formatted_shard = shard.with_format("arrow")
+
+ # Loop over single examples or batches and write to buffer/file if examples are to be updated
+ if not batched:
+ shard_iterable = enumerate(arrow_formatted_shard)
+ else:
+ num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size
+ shard_iterable = zip(
+ range(0, num_rows, batch_size),
+ arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch),
+ )
+ if not batched:
+ _time = time.time()
+ for i, example in shard_iterable:
+ example = apply_function_on_filtered_inputs(example, i, offset=offset)
+ if update_data:
+ if i == 0:
+ buf_writer, writer, tmp_file = init_buffer_and_writer()
+ stack.enter_context(writer)
+ if isinstance(example, pa.Table):
+ writer.write_row(example)
+ elif isinstance(example, pd.DataFrame):
+ writer.write_row(pa.Table.from_pandas(example))
+ else:
+ writer.write(example)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield rank, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ else:
+ _time = time.time()
+ for i, batch in shard_iterable:
+ num_examples_in_batch = len(batch)
+ indices = list(
+ range(*(slice(i, i + batch_size).indices(shard.num_rows)))
+ ) # Something simpler?
+ try:
+ batch = apply_function_on_filtered_inputs(
+ batch,
+ indices,
+ check_same_num_examples=len(shard.list_indexes()) > 0,
+ offset=offset,
+ )
+ except NumExamplesMismatchError:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it."
+ ) from None
+ if update_data:
+ if i == 0:
+ buf_writer, writer, tmp_file = init_buffer_and_writer()
+ stack.enter_context(writer)
+ if isinstance(batch, pa.Table):
+ writer.write_table(batch)
+ elif isinstance(batch, pd.DataFrame):
+ writer.write_table(pa.Table.from_pandas(batch))
+ else:
+ writer.write_batch(batch)
+ num_examples_progress_update += num_examples_in_batch
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield rank, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ if update_data and writer is not None:
+ writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
+ except (Exception, KeyboardInterrupt):
+ yield rank, False, num_examples_progress_update
+ if update_data:
+ if writer is not None:
+ writer.finalize()
+ if tmp_file is not None:
+ tmp_file.close()
+ if os.path.exists(tmp_file.name):
+ os.remove(tmp_file.name)
+ raise
+
+ yield rank, False, num_examples_progress_update
+ if update_data and tmp_file is not None:
+ tmp_file.close()
+ shutil.move(tmp_file.name, cache_file_name)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(cache_file_name, 0o666 & ~umask)
+
+ if update_data:
+ # Create new Dataset from buffer or file
+ info = shard.info.copy()
+ info.features = writer._features
+ info.task_templates = None
+ if buf_writer is None:
+ yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split)
+ else:
+ yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split)
+ else:
+ yield rank, True, shard
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1"
+ )
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
+ new_fingerprint: Optional[str] = None,
+ desc: Optional[str] = None,
+ ) -> "Dataset":
+ """Apply a filter function to all the elements in the table in batches
+ and update the table so that the dataset only includes examples according to the filter function.
+
+ Args:
+ function (`Callable`): Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ If no function is provided, defaults to an always `True` function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if
+ `batched = True`. If `batched = False`, one example per batch is passed to `function`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ fn_kwargs (`dict`, *optional*):
+ Keyword arguments to be passed to `function`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ suffix_template (`str`):
+ If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each.
+ For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`,
+ the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default
+ `_{rank:05d}_of_{num_proc:05d}`).
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while filtering examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.filter(lambda x: x["label"] == 1)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`"
+ )
+
+ if function is None:
+ function = lambda x: True # noqa: E731
+
+ if len(self) == 0:
+ return self
+
+ indices = self.map(
+ function=partial(
+ get_indices_from_mask_function,
+ function,
+ batched,
+ with_indices,
+ with_rank,
+ input_columns,
+ self._indices,
+ ),
+ with_indices=True,
+ with_rank=True,
+ features=Features({"indices": Value("uint64")}),
+ batched=True,
+ batch_size=batch_size,
+ remove_columns=self.column_names,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ suffix_template=suffix_template,
+ new_fingerprint=new_fingerprint,
+ input_columns=input_columns,
+ desc=desc or "Filter",
+ )
+ new_dataset = copy.deepcopy(self)
+ new_dataset._indices = indices.data
+ new_dataset._fingerprint = new_fingerprint
+ return new_dataset
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"])
+ def flatten_indices(
+ self,
+ keep_in_memory: bool = False,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ num_proc: Optional[int] = None,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create and cache a new Dataset by flattening the indices mapping.
+
+ Args:
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_name (`str`, *optional*, default `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Allow null values in the table.
+ num_proc (`int`, optional, default `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ """
+
+ return self.map(
+ batched=True, # for speed
+ keep_in_memory=keep_in_memory,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ new_fingerprint=new_fingerprint,
+ desc="Flattening the indices",
+ num_proc=num_proc,
+ )
+
+ def _new_dataset_with_indices(
+ self,
+ indices_cache_file_name: Optional[str] = None,
+ indices_buffer: Optional[pa.Buffer] = None,
+ fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the
+ current Dataset.
+ """
+
+ if indices_cache_file_name is None and indices_buffer is None:
+ raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.")
+
+ if fingerprint is None:
+ raise ValueError("please specify a fingerprint for the dataset with indices")
+
+ if indices_cache_file_name is not None:
+ indices_table = MemoryMappedTable.from_file(indices_cache_file_name)
+ else:
+ indices_table = InMemoryTable.from_buffer(indices_buffer)
+
+ # Return new Dataset object
+ # don't forget to copy the objects
+ return Dataset(
+ self._data,
+ info=self.info.copy(),
+ split=self.split,
+ indices_table=indices_table,
+ fingerprint=fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
+ def select(
+ self,
+ indices: Iterable,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows selected following the list/array of indices.
+
+ Args:
+ indices (`range`, `list`, `iterable`, `ndarray` or `Series`):
+ Range, list or 1D-array of integer indices for indexing.
+ If the indices correspond to a contiguous range, the Arrow table is simply sliced.
+ However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient,
+ but still faster than recreating an Arrow table made of the requested rows.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the indices mapping in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.select(range(4))
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # If indices is a PyArrow array, we convert to NumPy
+ if isinstance(indices, (pa.Array, pa.ChunkedArray)):
+ indices = indices.to_numpy().astype(np.int64)
+
+ # Convert generator objects to lists
+ if isinstance(indices, Iterator):
+ indices = list(indices)
+
+ # If the indices are contiguous, simply slice the arrow table
+ if isinstance(indices, range):
+ if _is_range_contiguous(indices) and indices.start >= 0:
+ start, length = indices.start, indices.stop - indices.start
+ return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
+ else:
+ try:
+ start = next(iter(indices))
+ except StopIteration:
+ # if `indices` is an empty iterable, we return an empty dataset
+ return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
+ if start >= 0:
+ counter_from_start = itertools.count(start=start)
+ if all(i == j for i, j in zip(indices, counter_from_start)):
+ length = next(counter_from_start) - start
+ return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
+
+ # If not contiguous, we need to create a new indices mapping
+ return self._select_with_indices_mapping(
+ indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def _select_contiguous(
+ self,
+ start: int,
+ length: int,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows from a contiguous slice of data.
+ The slice is defined by that start index and its length.
+
+ Args:
+ start (`int`): start index.
+ length (`int`): length of the slice to select.
+ new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds._select_contiguous(0, 4)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ _check_valid_indices_value(start, len(self))
+ _check_valid_indices_value(start + length - 1, len(self))
+ if self._indices is None or length == 0:
+ return Dataset(
+ self.data.slice(start, length),
+ info=self.info.copy(),
+ split=self.split,
+ fingerprint=new_fingerprint,
+ )
+ else:
+ return Dataset(
+ self.data,
+ info=self.info.copy(),
+ split=self.split,
+ indices_table=self._indices.slice(start, length),
+ fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
+ def _select_with_indices_mapping(
+ self,
+ indices: Iterable,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows selected following the list/array of indices.
+ The new dataset is made by creating a new indices mapping on top of the main arrow table.
+
+ Args:
+ indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing.
+ keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
+ new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds._select_with_indices_mapping(range(4))
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # Prepare the writer for our indices arrow table
+ if keep_in_memory or indices_cache_file_name is None:
+ buf_writer = pa.BufferOutputStream()
+ tmp_file = None
+ writer = ArrowWriter(
+ stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
+ )
+ else:
+ buf_writer = None
+ logger.info(f"Caching indices mapping at {indices_cache_file_name}")
+ tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False)
+ writer = ArrowWriter(
+ path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
+ )
+
+ indices = indices if isinstance(indices, list) else list(indices)
+
+ size = len(self)
+ if indices:
+ _check_valid_indices_value(int(max(indices)), size=size)
+ _check_valid_indices_value(int(min(indices)), size=size)
+ else:
+ return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
+
+ indices_array = pa.array(indices, type=pa.uint64())
+ # Check if we need to convert indices
+ if self._indices is not None:
+ indices_array = self._indices.column(0).take(indices_array)
+
+ indices_table = pa.Table.from_arrays([indices_array], names=["indices"])
+
+ with writer:
+ try:
+ writer.write_table(indices_table)
+ writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file
+ except (Exception, KeyboardInterrupt):
+ if tmp_file is not None:
+ tmp_file.close()
+ if os.path.exists(tmp_file.name):
+ os.remove(tmp_file.name)
+ raise
+
+ if tmp_file is not None:
+ tmp_file.close()
+ shutil.move(tmp_file.name, indices_cache_file_name)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(indices_cache_file_name, 0o666 & ~umask)
+
+ # Return new Dataset object
+ if buf_writer is None:
+ return self._new_dataset_with_indices(
+ indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint
+ )
+ else:
+ return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"])
+ def sort(
+ self,
+ column_names: Union[str, Sequence_[str]],
+ reverse: Union[bool, Sequence_[bool]] = False,
+ kind="deprecated",
+ null_placement: str = "at_end",
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset sorted according to a single or multiple columns.
+
+ Args:
+ column_names (`Union[str, Sequence[str]]`):
+ Column name(s) to sort by.
+ reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
+ If `True`, sort by descending order rather than ascending. If a single bool is provided,
+ the value is applied to the sorting of all column names. Otherwise a list of bools with the
+ same length and order as column_names must be provided.
+ kind (`str`, *optional*):
+ Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
+ The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general,
+ the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
+
+
+ `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
+
+
+ null_placement (`str`, defaults to `at_end`):
+ Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
+
+
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the sorted indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the sorted indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ sorted indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ Higher value gives smaller cache files, lower value consume less temporary memory.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='validation')
+ >>> ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> sorted_ds = ds.sort('label')
+ >>> sorted_ds['label'][:10]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
+ >>> another_sorted_ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # Deprecation warning
+ if kind != "deprecated":
+ warnings.warn(
+ "'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+
+ # Check proper format of and for duplicates in column_names
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ # Check proper format and length of reverse
+ if not isinstance(reverse, bool):
+ if len(reverse) != len(column_names):
+ raise ValueError(
+ "Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'."
+ )
+ else:
+ reverse = [reverse] * len(column_names)
+
+ # Check whether column name(s) exist in dataset
+ for column in column_names:
+ if not isinstance(column, str) or column not in self._data.column_names:
+ raise ValueError(
+ f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}"
+ )
+
+ # Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability
+ if null_placement not in ["at_start", "at_end"]:
+ if null_placement == "first":
+ null_placement = "at_start"
+ elif null_placement == "last":
+ null_placement = "at_end"
+ else:
+ raise ValueError(
+ f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'."
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+ indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
+ if os.path.exists(indices_cache_file_name) and load_from_cache_file:
+ logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}")
+ return self._new_dataset_with_indices(
+ fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
+ )
+
+ sort_table = query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ )
+
+ sort_keys = [
+ (col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse)
+ ]
+
+ indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement)
+
+ return self.select(
+ indices=indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]
+ )
+ def shuffle(
+ self,
+ seed: Optional[int] = None,
+ generator: Optional[np.random.Generator] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new Dataset where the rows are shuffled.
+
+ Currently shuffling uses numpy random generators.
+ You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
+
+ Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
+ However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
+ This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
+ To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
+ This may take a lot of time depending of the size of your dataset though:
+
+ ```python
+ my_dataset[0] # fast
+ my_dataset = my_dataset.shuffle(seed=42)
+ my_dataset[0] # up to 10x slower
+ my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
+ my_dataset[0] # fast again
+ ```
+
+ In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
+ It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal:
+
+ ```python
+ my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128)
+ for example in enumerate(my_iterable_dataset): # fast
+ pass
+
+ shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
+
+ for example in enumerate(shuffled_iterable_dataset): # as fast as before
+ pass
+ ```
+
+ Args:
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ keep_in_memory (`bool`, default `False`):
+ Keep the shuffled indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the shuffled indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ shuffled indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+
+ # set a seed
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> shuffled_ds['label'][:10]
+ [1, 0, 1, 1, 0, 0, 0, 0, 0, 0]
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if seed is not None and generator is not None:
+ raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.")
+
+ if generator is not None and not isinstance(generator, np.random.Generator):
+ raise ValueError("The provided generator must be an instance of numpy.random.Generator")
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if generator is None:
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ _ = np.random.random() # do 1 step of rng
+ generator = np.random.default_rng(seed)
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+ indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
+ if os.path.exists(indices_cache_file_name) and load_from_cache_file:
+ logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}")
+ return self._new_dataset_with_indices(
+ fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
+ )
+
+ permutation = generator.permutation(len(self))
+
+ return self.select(
+ indices=permutation,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False,
+ randomized_function=True,
+ fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"],
+ ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"],
+ )
+ def train_test_split(
+ self,
+ test_size: Union[float, int, None] = None,
+ train_size: Union[float, int, None] = None,
+ shuffle: bool = True,
+ stratify_by_column: Optional[str] = None,
+ seed: Optional[int] = None,
+ generator: Optional[np.random.Generator] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ train_indices_cache_file_name: Optional[str] = None,
+ test_indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ train_new_fingerprint: Optional[str] = None,
+ test_new_fingerprint: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits).
+ Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.
+
+ This method is similar to scikit-learn `train_test_split`.
+
+ Args:
+ test_size (`numpy.random.Generator`, *optional*):
+ Size of the test split
+ If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split.
+ If `int`, represents the absolute number of test samples.
+ If `None`, the value is set to the complement of the train size.
+ If `train_size` is also `None`, it will be set to `0.25`.
+ train_size (`numpy.random.Generator`, *optional*):
+ Size of the train split
+ If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split.
+ If `int`, represents the absolute number of train samples.
+ If `None`, the value is automatically set to the complement of the test size.
+ shuffle (`bool`, *optional*, defaults to `True`):
+ Whether or not to shuffle the data before splitting.
+ stratify_by_column (`str`, *optional*, defaults to `None`):
+ The column name of labels to be used to perform stratified split of data.
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the splits indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the splits indices
+ can be identified, use it instead of recomputing.
+ train_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ train split indices instead of the automatically generated cache file name.
+ test_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ test split indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ train_new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the train set after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ test_new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the test set after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.train_test_split(test_size=0.2, shuffle=True)
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 852
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 214
+ })
+ })
+
+ # set a seed
+ >>> ds = ds.train_test_split(test_size=0.2, seed=42)
+
+ # stratified split
+ >>> ds = load_dataset("imdb",split="train")
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 25000
+ })
+ >>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label")
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 20000
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 5000
+ })
+ })
+ ```
+ """
+ from .dataset_dict import DatasetDict # import here because of circular dependency
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return DatasetDict({"train": self, "test": self})
+
+ if test_size is None and train_size is None:
+ test_size = 0.25
+
+ # Safety checks similar to scikit-learn's ones.
+ # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)
+ n_samples = len(self)
+ if (
+ isinstance(test_size, int)
+ and (test_size >= n_samples or test_size <= 0)
+ or isinstance(test_size, float)
+ and (test_size <= 0 or test_size >= 1)
+ ):
+ raise ValueError(
+ f"test_size={test_size} should be either positive and smaller "
+ f"than the number of samples {n_samples} or a float in the (0, 1) range"
+ )
+
+ if (
+ isinstance(train_size, int)
+ and (train_size >= n_samples or train_size <= 0)
+ or isinstance(train_size, float)
+ and (train_size <= 0 or train_size >= 1)
+ ):
+ raise ValueError(
+ f"train_size={train_size} should be either positive and smaller "
+ f"than the number of samples {n_samples} or a float in the (0, 1) range"
+ )
+
+ if train_size is not None and not isinstance(train_size, (int, float)):
+ raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}")
+ if test_size is not None and not isinstance(test_size, (int, float)):
+ raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}")
+
+ if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:
+ raise ValueError(
+ f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)"
+ " range. Reduce test_size and/or train_size."
+ )
+
+ if isinstance(test_size, float):
+ n_test = ceil(test_size * n_samples)
+ elif isinstance(test_size, int):
+ n_test = float(test_size)
+
+ if isinstance(train_size, float):
+ n_train = floor(train_size * n_samples)
+ elif isinstance(train_size, int):
+ n_train = float(train_size)
+
+ if train_size is None:
+ n_train = n_samples - n_test
+ elif test_size is None:
+ n_test = n_samples - n_train
+
+ if n_train + n_test > n_samples:
+ raise ValueError(
+ f"The sum of train_size and test_size = {n_train + n_test}, "
+ "should be smaller than the number of "
+ f"samples {n_samples}. Reduce test_size and/or "
+ "train_size."
+ )
+
+ n_train, n_test = int(n_train), int(n_test)
+
+ if n_train == 0:
+ raise ValueError(
+ f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the "
+ "resulting train set will be empty. Adjust any of the "
+ "aforementioned parameters."
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if generator is None and shuffle is True:
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ _ = np.random.random() # do 1 step of rng
+ generator = np.random.default_rng(seed)
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if train_indices_cache_file_name is None or test_indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+
+ if train_indices_cache_file_name is None:
+ train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)
+ if test_indices_cache_file_name is None:
+ test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)
+ if (
+ os.path.exists(train_indices_cache_file_name)
+ and os.path.exists(test_indices_cache_file_name)
+ and load_from_cache_file
+ ):
+ logger.info(
+ f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}"
+ )
+ return DatasetDict(
+ {
+ "train": self._new_dataset_with_indices(
+ fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name
+ ),
+ "test": self._new_dataset_with_indices(
+ fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name
+ ),
+ }
+ )
+ if not shuffle:
+ if stratify_by_column is not None:
+ raise ValueError("Stratified train/test split is not implemented for `shuffle=False`")
+ train_indices = np.arange(n_train)
+ test_indices = np.arange(n_train, n_train + n_test)
+ else:
+ # stratified partition
+ if stratify_by_column is not None:
+ if stratify_by_column not in self._info.features.keys():
+ raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}")
+ if not isinstance(self._info.features[stratify_by_column], ClassLabel):
+ raise ValueError(
+ f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}."
+ )
+ try:
+ train_indices, test_indices = next(
+ stratified_shuffle_split_generate_indices(
+ self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator
+ )
+ )
+ except Exception as error:
+ if str(error) == "Minimum class count error":
+ raise ValueError(
+ f"The least populated class in {stratify_by_column} column has only 1"
+ " member, which is too few. The minimum"
+ " number of groups for any class cannot"
+ " be less than 2."
+ )
+ else:
+ raise error
+
+ # random partition
+ else:
+ permutation = generator.permutation(len(self))
+ test_indices = permutation[:n_test]
+ train_indices = permutation[n_test : (n_test + n_train)]
+
+ train_split = self.select(
+ indices=train_indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=train_indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=train_new_fingerprint,
+ )
+ test_split = self.select(
+ indices=test_indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=test_indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=test_new_fingerprint,
+ )
+
+ return DatasetDict({"train": train_split, "test": test_split})
+
+ def shard(
+ self,
+ num_shards: int,
+ index: int,
+ contiguous: bool = False,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "Dataset":
+ """Return the `index`-nth shard from dataset split into `num_shards` pieces.
+
+ This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose
+ index mod `n = i`.
+
+ `dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks,
+ so it can be easily concatenated back together after processing. If `n % i == l`, then the
+ first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`.
+ `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return
+ a dataset with the same order as the original.
+
+ Be sure to shard before using any randomizing operator (such as `shuffle`).
+ It is best if the shard operator is used early in the dataset pipeline.
+
+
+ Args:
+ num_shards (`int`):
+ How many shards to split the dataset into.
+ index (`int`):
+ Which shard to select and return.
+ contiguous: (`bool`, defaults to `False`):
+ Whether to select contiguous blocks of indices for shards.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ indices of each shard instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 1066
+ })
+ >>> ds.shard(num_shards=2, index=0)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ ```
+ """
+ if not 0 <= index < num_shards:
+ raise ValueError("index should be in [0, num_shards-1]")
+ if contiguous:
+ div = len(self) // num_shards
+ mod = len(self) % num_shards
+ start = div * index + min(index, mod)
+ end = start + div + (1 if index < mod else 0)
+ indices = range(start, end)
+ else:
+ indices = np.arange(index, len(self), num_shards)
+
+ return self.select(
+ indices=indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ )
+
+ @deprecated()
+ def export(
+ self,
+ filename: str,
+ format: str = "tfrecord",
+ ):
+ """Writes the Arrow dataset to a TFRecord file.
+
+ The dataset must already be in tensorflow format. The records will be written with
+ keys from `dataset._format_columns`.
+
+ Args:
+ filename (`str`): The filename, including the `.tfrecord` extension, to write to.
+ format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as
+ TFRecords are the only option. This enables a more flexible function signature later.
+ """
+ try:
+ import tensorflow as tf # noqa: F401
+ except ImportError:
+ logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
+
+ # From https://www.tensorflow.org/tutorials/load_data/tfrecord
+ def _bytes_feature(values):
+ """Returns a bytes_list from a list of string / byte."""
+ return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
+
+ def _float_feature(values):
+ """Returns a float_list from a list of float / double."""
+ return tf.train.Feature(float_list=tf.train.FloatList(value=values))
+
+ def _int64_feature(values):
+ """Returns an int64_list from a list of bool / enum / int / uint."""
+ return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
+
+ def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature":
+ """Typechecks `values` and returns the corresponding tf.train.Feature."""
+ if isinstance(values, list):
+ if values and isinstance(values[0], str):
+ return _bytes_feature([v.encode() for v in values])
+ else:
+ raise ValueError(f"values={values} is empty or contains items that cannot be serialized")
+ elif isinstance(values, np.ndarray):
+ if values.dtype == np.dtype(float):
+ return _float_feature(values)
+ elif values.dtype == np.int64:
+ return _int64_feature(values)
+ elif values.dtype == np.dtype(str) or (
+ values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str)
+ ):
+ return _bytes_feature([v.encode() for v in values])
+ else:
+ raise ValueError(
+ f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized"
+ )
+ elif hasattr(values, "dtype"):
+ if np.issubdtype(values.dtype, np.floating):
+ return _float_feature([values.item()])
+ elif np.issubdtype(values.dtype, np.integer):
+ return _int64_feature([values.item()])
+ elif np.issubdtype(values.dtype, str):
+ return _bytes_feature([values.item().encode()])
+ else:
+ raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized")
+ else:
+ raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized")
+
+ def serialize_example(ex):
+ feature = {key: _feature(value) for key, value in ex.items()}
+ example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
+ return example_proto.SerializeToString()
+
+ def tf_serialize_example(ex):
+ tf_string = tf.py_function(serialize_example, (ex,), tf.string)
+ return tf.reshape(tf_string, ())
+
+ def generator():
+ for ex in self:
+ yield serialize_example(ex)
+
+ if self._format_type != "numpy":
+ raise ValueError("Dataset format must be numpy before exporting")
+ if not filename.endswith(".tfrecord"):
+ raise ValueError("filename {filename} must end with .tfrecord")
+ tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=())
+ writer = tf.data.experimental.TFRecordWriter(filename)
+ logger.info(f"Writing TFRecord to {filename}")
+ writer.write(tf_dataset)
+ logger.info(f"Finished writing TFRecord to {filename}")
+ self = None # delete the dataset reference used by tf_dataset
+
+ def to_csv(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ **to_csv_kwargs,
+ ) -> int:
+ """Exports the dataset to csv
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file or a BinaryIO.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing. `batch_size` in this case defaults to
+ `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
+ value if you have sufficient compute power.
+ **to_csv_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
+
+
+
+ Now, `index` defaults to `False` if not specified.
+
+ If you would like to write the index, pass `index=True` and also set a name for the index column by
+ passing `index_label`.
+
+
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_csv("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetWriter
+
+ return CsvDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_csv_kwargs).write()
+
+ def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]:
+ """Returns the dataset as a Python dict. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+
+
+
+ Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.
+
+
+
+ batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+
+ Returns:
+ `dict` or `Iterator[dict]`
+
+ Example:
+
+ ```py
+ >>> ds.to_dict()
+ ```
+ """
+ if batched != "deprecated":
+ warnings.warn(
+ "'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.",
+ FutureWarning,
+ )
+ else:
+ batched = False
+
+ if not batched:
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pydict()
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices,
+ ).to_pydict()
+ for offset in range(0, len(self), batch_size)
+ )
+
+ def to_list(self) -> list:
+ """Returns the dataset as a Python list.
+
+ Returns:
+ `list`
+
+ Example:
+
+ ```py
+ >>> ds.to_list()
+ ```
+ """
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pylist()
+
+ def to_json(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ **to_json_kwargs,
+ ) -> int:
+ """Export the dataset to JSON Lines or JSON.
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file or a BinaryIO.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing. `batch_size` in this case defaults to
+ `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
+ value if you have sufficient compute power.
+ **to_json_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
+
+
+
+ Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`.
+
+ If you would like to write the index, pass `index=True`.
+
+
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_json("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetWriter
+
+ return JsonDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_json_kwargs).write()
+
+ def to_pandas(
+ self, batch_size: Optional[int] = None, batched: bool = False
+ ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
+ """Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+ batch_size (`int`, *optional*):
+ The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+
+ Returns:
+ `pandas.DataFrame` or `Iterator[pandas.DataFrame]`
+
+ Example:
+
+ ```py
+ >>> ds.to_pandas()
+ ```
+ """
+ if not batched:
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pandas(types_mapper=pandas_types_mapper)
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices,
+ ).to_pandas(types_mapper=pandas_types_mapper)
+ for offset in range(0, len(self), batch_size)
+ )
+
+ def to_parquet(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ **parquet_writer_kwargs,
+ ) -> int:
+ """Exports the dataset to parquet
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file or a BinaryIO.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ **parquet_writer_kwargs (additional keyword arguments):
+ Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`.
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_parquet("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetWriter
+
+ return ParquetDatasetWriter(self, path_or_buf, batch_size=batch_size, **parquet_writer_kwargs).write()
+
+ def to_sql(
+ self,
+ name: str,
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ batch_size: Optional[int] = None,
+ **sql_writer_kwargs,
+ ) -> int:
+ """Exports the dataset to a SQL database.
+
+ Args:
+ name (`str`):
+ Name of SQL table.
+ con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
+ A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ **sql_writer_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html).
+
+
+
+ Now, `index` defaults to `False` if not specified.
+
+ If you would like to write the index, pass `index=True` and also set a name for the index column by
+ passing `index_label`.
+
+
+
+ Returns:
+ `int`: The number of records written.
+
+ Example:
+
+ ```py
+ >>> # con provided as a connection URI string
+ >>> ds.to_sql("data", "sqlite:///my_own_db.sql")
+ >>> # con provided as a sqlite3 connection object
+ >>> import sqlite3
+ >>> con = sqlite3.connect("my_own_db.sql")
+ >>> with con:
+ ... ds.to_sql("data", con)
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.sql import SqlDatasetWriter
+
+ return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write()
+
+ def _estimate_nbytes(self) -> int:
+ dataset_nbytes = self.data.nbytes
+
+ # Find decodable columns, because if there are any, we need to
+ # adjust the dataset size computation (needed for sharding) to account for possible external files
+ decodable_columns = [
+ k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)
+ ]
+
+ if decodable_columns:
+ # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples
+ extra_nbytes = 0
+
+ def extra_nbytes_visitor(array, feature):
+ nonlocal extra_nbytes
+ if isinstance(feature, (Audio, Image)):
+ for x in array.to_pylist():
+ if x is not None and x["bytes"] is None and x["path"] is not None:
+ size = xgetsize(x["path"])
+ extra_nbytes += size
+ extra_nbytes -= array.field("path").nbytes
+
+ table = self.with_format("arrow")[:1000]
+ table_visitor(table, extra_nbytes_visitor)
+
+ extra_nbytes = extra_nbytes * len(self.data) / len(table)
+ dataset_nbytes = dataset_nbytes + extra_nbytes
+
+ if self._indices is not None:
+ dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data)
+ return dataset_nbytes
+
+ @staticmethod
+ def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int):
+ for shard_idx, shard in enumerate(shards):
+ for pa_table in shard.with_format("arrow").iter(batch_size):
+ yield shard_idx, pa_table
+
+ @staticmethod
+ def _generate_tables_from_cache_file(filename: str):
+ for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)):
+ yield batch_idx, pa.Table.from_batches([batch])
+
+ def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset":
+ """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`].
+ This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files.
+
+ Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop).
+ Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets.
+ All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset.
+
+ Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`].
+ This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough.
+
+ To get the best speed performance, make sure your dataset doesn't have an indices mapping.
+ If this is the case, the data are not read contiguously, which can be slow sometimes.
+ You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset.
+
+ Args:
+ num_shards (`int`, default to `1`):
+ Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly,
+ and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example.
+ Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk.
+
+ Returns:
+ [`datasets.IterableDataset`]
+
+ Example:
+
+ Basic usage:
+ ```python
+ >>> ids = ds.to_iterable_dataset()
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With lazy filtering and processing:
+ ```python
+ >>> ids = ds.to_iterable_dataset()
+ >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With sharding to enable efficient shuffling:
+ ```python
+ >>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With a PyTorch DataLoader:
+ ```python
+ >>> import torch
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.filter(filter_fn).map(process_fn)
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With a PyTorch DataLoader and shuffling:
+ ```python
+ >>> import torch
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling
+ ```python
+ >>> from datasets.distributed import split_dataset_by_node
+ >>> ids = ds.to_iterable_dataset(num_shards=512)
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With shuffling and multiple epochs:
+ ```python
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> for epoch in range(n_epochs):
+ ... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating
+ ... for example in ids:
+ ... pass
+ ```
+ Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups.
+ """
+ from .iterable_dataset import ArrowExamplesIterable, IterableDataset
+
+ if self._format_type is not None:
+ raise NotImplementedError(
+ "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset"
+ )
+ if num_shards > len(self):
+ raise ValueError(
+ f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)."
+ )
+ if self._indices is not None:
+ logger.info(
+ "Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. "
+ "You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed."
+ )
+ shards = (
+ [copy.deepcopy(self)]
+ if num_shards == 1
+ else [
+ self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards)
+ ]
+ )
+ ex_iterable = ArrowExamplesIterable(
+ Dataset._generate_tables_from_shards,
+ kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE},
+ )
+ return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features))
+
+ def _push_parquet_shards_to_hub(
+ self,
+ repo_id: str,
+ data_dir: str = "data",
+ split: Optional[str] = None,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[int] = None,
+ embed_external_files: bool = True,
+ ) -> Tuple[str, str, int, int, List[str], int]:
+ """Pushes the dataset shards as Parquet files to the hub.
+
+ Returns:
+ additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards
+ uploaded_size (`int`): number of uploaded bytes to the repository
+ dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression
+ """
+ # Find decodable columns, because if there are any, we need to:
+ # embed the bytes from the files in the shards
+ decodable_columns = (
+ [k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)]
+ if embed_external_files
+ else []
+ )
+
+ dataset_nbytes = self._estimate_nbytes()
+
+ if num_shards is None:
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
+ num_shards = max(num_shards, 1)
+
+ shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
+
+ if decodable_columns:
+
+ def shards_with_embedded_external_files(shards):
+ for shard in shards:
+ format = shard.format
+ shard = shard.with_format("arrow")
+ shard = shard.map(
+ embed_table_storage,
+ batched=True,
+ batch_size=1000,
+ keep_in_memory=True,
+ )
+ shard = shard.with_format(**format)
+ yield shard
+
+ shards = shards_with_embedded_external_files(shards)
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ uploaded_size = 0
+ additions = []
+ for index, shard in hf_tqdm(
+ enumerate(shards),
+ desc="Uploading the dataset shards",
+ total=num_shards,
+ ):
+ shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet"
+ buffer = BytesIO()
+ shard.to_parquet(buffer)
+ uploaded_size += buffer.tell()
+ shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer)
+ preupload_lfs_files(
+ api,
+ repo_id=repo_id,
+ additions=[shard_addition],
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ additions.append(shard_addition)
+
+ return additions, uploaded_size, dataset_nbytes
+
+ def push_to_hub(
+ self,
+ repo_id: str,
+ config_name: str = "default",
+ set_default: Optional[bool] = None,
+ split: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ commit_description: Optional[str] = None,
+ private: Optional[bool] = False,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ branch="deprecated",
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[int] = None,
+ embed_external_files: bool = True,
+ ) -> CommitInfo:
+ """Pushes the dataset to the hub as a Parquet dataset.
+ The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
+
+ The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`]
+ data, the Parquet files will store the bytes of your images or audio files.
+ You can disable this by setting `embed_external_files` to `False`.
+
+ Args:
+ repo_id (`str`):
+ The ID of the repository to push to in the following format: `/` or
+ `/`. Also accepts ``, which will default to the namespace
+ of the logged-in user.
+ config_name (`str`, defaults to "default"):
+ The configuration name (or subset) of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
+ split (`str`, *optional*):
+ The name of the split that will be given to that dataset. Defaults to `self.split`.
+ data_dir (`str`, *optional*):
+ Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
+ from "default", else "data".
+
+
+ commit_message (`str`, *optional*):
+ Message to commit while pushing. Will default to `"Upload dataset"`.
+ commit_description (`str`, *optional*):
+ Description of the commit that will be created.
+ Additionally, description of the PR if a PR is created (`create_pr` is True).
+
+
+ private (`bool`, *optional*, defaults to `False`):
+ Whether the dataset repository should be set to private or not. Only affects repository creation:
+ a repository that already exists will not be affected by that parameter.
+ token (`str`, *optional*):
+ An optional authentication token for the Hugging Face Hub. If no token is passed, will default
+ to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
+ if no token is passed and the user is not logged-in.
+ revision (`str`, *optional*):
+ Branch to push the uploaded files to. Defaults to the `"main"` branch.
+
+
+ branch (`str`, *optional*):
+ The git branch on which to push the dataset. This defaults to the default branch as specified
+ in your repository, which defaults to `"main"`.
+
+
+
+ `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0.
+
+
+ create_pr (`bool`, *optional*, defaults to `False`):
+ Whether to create a PR with the uploaded files or directly commit.
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by
+ a unit (like `"5MB"`).
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
+
+
+ embed_external_files (`bool`, defaults to `True`):
+ Whether to embed file bytes in the shards.
+ In particular, this will do the following before the push for the fields of type:
+
+ - [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files.
+
+ Return:
+ huggingface_hub.CommitInfo
+
+ Example:
+
+ ```python
+ >>> dataset.push_to_hub("/")
+ >>> dataset_dict.push_to_hub("/", private=True)
+ >>> dataset.push_to_hub("/", max_shard_size="1GB")
+ >>> dataset.push_to_hub("/", num_shards=1024)
+ ```
+
+ If your dataset has multiple splits (e.g. train/validation/test):
+
+ ```python
+ >>> train_dataset.push_to_hub("/", split="train")
+ >>> val_dataset.push_to_hub("/", split="validation")
+ >>> # later
+ >>> dataset = load_dataset("/")
+ >>> train_dataset = dataset["train"]
+ >>> val_dataset = dataset["validation"]
+ ```
+
+ If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
+
+ ```python
+ >>> english_dataset.push_to_hub("/", "en")
+ >>> french_dataset.push_to_hub("/", "fr")
+ >>> # later
+ >>> english_dataset = load_dataset("/", "en")
+ >>> french_dataset = load_dataset("/", "fr")
+ ```
+ """
+ if config_name == "data":
+ raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.")
+
+ if max_shard_size is not None and num_shards is not None:
+ raise ValueError(
+ "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
+ )
+
+ if split is None:
+ split = str(self.split) if self.split is not None else "train"
+
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
+
+ if branch != "deprecated":
+ warnings.warn(
+ "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'revision={branch}' instead.",
+ FutureWarning,
+ )
+ revision = branch
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ repo_url = api.create_repo(
+ repo_id,
+ token=token,
+ repo_type="dataset",
+ private=private,
+ exist_ok=True,
+ )
+ repo_id = repo_url.repo_id
+
+ if revision is not None:
+ api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
+
+ if not data_dir:
+ data_dir = config_name if config_name != "default" else "data" # for backward compatibility
+
+ additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub(
+ repo_id=repo_id,
+ data_dir=data_dir,
+ split=split,
+ token=token,
+ revision=revision,
+ max_shard_size=max_shard_size,
+ num_shards=num_shards,
+ create_pr=create_pr,
+ embed_external_files=embed_external_files,
+ )
+
+ # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
+ # and delete old split shards (if they exist)
+ repo_with_dataset_card, repo_with_dataset_infos = False, False
+ deletions, deleted_size = [], 0
+ repo_splits = [] # use a list to keep the order of the splits
+ repo_files_to_add = [addition.path_in_repo for addition in additions]
+ for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token):
+ if repo_file.rfilename == config.REPOCARD_FILENAME:
+ repo_with_dataset_card = True
+ elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
+ repo_with_dataset_infos = True
+ elif (
+ repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add
+ ):
+ deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
+ deleted_size += repo_file.size
+ elif fnmatch.fnmatch(
+ repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
+ ):
+ repo_split = string_to_dict(
+ repo_file.rfilename,
+ glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
+ )["split"]
+ if repo_split not in repo_splits:
+ repo_splits.append(repo_split)
+
+ organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
+ info_to_dump = self.info.copy()
+ info_to_dump.download_checksums = None
+ info_to_dump.download_size = uploaded_size
+ info_to_dump.dataset_size = dataset_nbytes
+ info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes
+ info_to_dump.config_name = config_name
+ info_to_dump.splits = SplitDict(
+ {split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)}
+ )
+ # get the info from the README to update them
+ if repo_with_dataset_card:
+ dataset_card_path = api.hf_hub_download(
+ repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
+ )
+ dataset_card = DatasetCard.load(Path(dataset_card_path))
+ dataset_card_data = dataset_card.data
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ if dataset_infos and config_name in dataset_infos:
+ repo_info = dataset_infos[config_name]
+ else:
+ repo_info = None
+ # get the deprecated dataset_infos.json to update them
+ elif repo_with_dataset_infos:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None
+ repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ repo_info = None
+ # update the total info to dump from existing info
+ if repo_info is not None:
+ logger.info("Updating downloaded metadata with the new split.")
+ if repo_info.splits and list(repo_info.splits) != [split]:
+ if self._info.features != repo_info.features:
+ raise ValueError(
+ f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}"
+ )
+
+ if split in repo_info.splits:
+ repo_info.download_size -= deleted_size
+ repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0
+
+ repo_info.download_checksums = None
+ repo_info.download_size = (repo_info.download_size or 0) + uploaded_size
+ repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes
+ repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size
+ repo_info.splits.pop(split, None)
+ repo_info.splits[split] = SplitInfo(
+ split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name
+ )
+ info_to_dump = repo_info
+ # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
+ if not metadata_configs and repo_splits:
+ default_metadata_configs_to_dump = {
+ "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
+ }
+ MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
+ # update the metadata configs
+ if config_name in metadata_configs:
+ metadata_config = metadata_configs[config_name]
+ if "data_files" in metadata_config:
+ data_files_to_dump = sanitize_patterns(metadata_config["data_files"])
+ else:
+ data_files_to_dump = {}
+ # add the new split
+ data_files_to_dump[split] = [f"{data_dir}/{split}-*"]
+ metadata_config_to_dump = {
+ "data_files": [
+ {
+ "split": _split,
+ "path": _pattern[0] if len(_pattern) == 1 else _pattern,
+ }
+ for _split, _pattern in data_files_to_dump.items()
+ ]
+ }
+ else:
+ metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]}
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
+ # push to the deprecated dataset_infos.json
+ if repo_with_dataset_infos:
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_infos[config_name] = asdict(info_to_dump)
+ buffer = BytesIO()
+ buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
+ )
+ # push to README
+ DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
+ MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
+ dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
+ )
+
+ commit_message = commit_message if commit_message is not None else "Upload dataset"
+ if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
+ commit_info = api.create_commit(
+ repo_id,
+ operations=additions + deletions,
+ commit_message=commit_message,
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ else:
+ logger.info(
+ f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
+ )
+ num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
+ for i in range(0, num_commits):
+ operations = additions[
+ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
+ ] + (deletions if i == 0 else [])
+ commit_info = api.create_commit(
+ repo_id,
+ operations=operations,
+ commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ logger.info(
+ f"Commit #{i+1} completed"
+ + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ + "."
+ )
+ return commit_info
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str):
+ """Add column to Dataset.
+
+
+
+ Args:
+ name (`str`):
+ Column name.
+ column (`list` or `np.array`):
+ Column data to be added.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> more_text = ds["text"]
+ >>> ds.add_column(name="text_2", column=more_text)
+ Dataset({
+ features: ['text', 'label', 'text_2'],
+ num_rows: 1066
+ })
+ ```
+ """
+ column_table = InMemoryTable.from_pydict({name: column})
+ _check_column_names(self._data.column_names + column_table.column_names)
+ dataset = self.flatten_indices() if self._indices is not None else self
+ # Concatenate tables horizontally
+ table = concat_tables([dataset._data, column_table], axis=1)
+ # Update features
+ info = dataset.info.copy()
+ info.features.update(Features.from_arrow_schema(column_table.schema))
+ table = update_metadata_with_features(table, info.features)
+ return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint)
+
+ def add_faiss_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ device: Optional[int] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None, # noqa: F821
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ dtype=np.float32,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ By default the index is done over the vectors of the specified column.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+
+ - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
+
+ Args:
+ column (`str`):
+ The column of the vectors to add to the index.
+ index_name (`str`, *optional*):
+ The `index_name`/identifier of the index.
+ This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
+ By default it corresponds to `column`.
+ device (`Union[int, List[int]]`, *optional*):
+ If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (`str`, *optional*):
+ This is passed to the index factory of Faiss to create the index.
+ Default index class is `IndexFlat`.
+ metric_type (`int`, *optional*):
+ Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (`faiss.Index`, *optional*):
+ Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (`int`):
+ Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`.
+
+ train_size (`int`, *optional*):
+ If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to `False`):
+ Enable the verbosity of the Faiss index.
+ dtype (`data-type`):
+ The dtype of the numpy arrays that are indexed.
+ Default is `np.float32`.
+
+ Example:
+
+ ```python
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))
+ >>> ds_with_embeddings.add_faiss_index(column='embeddings')
+ >>> # query
+ >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)
+ >>> # save index
+ >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')
+
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> # load index
+ >>> ds.load_faiss_index('embeddings', 'my_index.faiss')
+ >>> # query
+ >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)
+ ```
+ """
+ with self.formatted_as(type="numpy", columns=[column], dtype=dtype):
+ super().add_faiss_index(
+ column=column,
+ index_name=index_name,
+ device=device,
+ string_factory=string_factory,
+ metric_type=metric_type,
+ custom_index=custom_index,
+ batch_size=batch_size,
+ train_size=train_size,
+ faiss_verbose=faiss_verbose,
+ )
+ return self
+
+ def add_faiss_index_from_external_arrays(
+ self,
+ external_arrays: np.array,
+ index_name: str,
+ device: Optional[int] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None, # noqa: F821
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ dtype=np.float32,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of `external_arrays`.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+
+ - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
+
+ Args:
+ external_arrays (`np.array`):
+ If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
+ It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
+ device (Optional `Union[int, List[int]]`, *optional*):
+ If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (`str`, *optional*):
+ This is passed to the index factory of Faiss to create the index.
+ Default index class is `IndexFlat`.
+ metric_type (`int`, *optional*):
+ Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (`faiss.Index`, *optional*):
+ Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (`int`, *optional*):
+ Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (`int`, *optional*):
+ If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False):
+ Enable the verbosity of the Faiss index.
+ dtype (`numpy.dtype`):
+ The dtype of the numpy arrays that are indexed. Default is np.float32.
+ """
+ super().add_faiss_index_from_external_arrays(
+ external_arrays=external_arrays.astype(dtype),
+ index_name=index_name,
+ device=device,
+ string_factory=string_factory,
+ metric_type=metric_type,
+ custom_index=custom_index,
+ batch_size=batch_size,
+ train_size=train_size,
+ faiss_verbose=faiss_verbose,
+ )
+
+ def add_elasticsearch_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Add a text index using ElasticSearch for fast retrieval. This is done in-place.
+
+ Args:
+ column (`str`):
+ The column of the documents to add to the index.
+ index_name (`str`, *optional*):
+ The `index_name`/identifier of the index.
+ This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`Dataset.search`].
+ By default it corresponds to `column`.
+ host (`str`, *optional*, defaults to `localhost`):
+ Host of where ElasticSearch is running.
+ port (`str`, *optional*, defaults to `9200`):
+ Port of where ElasticSearch is running.
+ es_client (`elasticsearch.Elasticsearch`, *optional*):
+ The elasticsearch client used to create the index if host and port are `None`.
+ es_index_name (`str`, *optional*):
+ The elasticsearch index name used to create the index.
+ es_index_config (`dict`, *optional*):
+ The configuration of the elasticsearch index.
+ Default config is:
+ ```
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ ```
+ Example:
+
+ ```python
+ >>> es_client = elasticsearch.Elasticsearch()
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index")
+ >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)
+ ```
+ """
+ with self.formatted_as(type=None, columns=[column]):
+ super().add_elasticsearch_index(
+ column=column,
+ index_name=index_name,
+ host=host,
+ port=port,
+ es_client=es_client,
+ es_index_name=es_index_name,
+ es_index_config=es_index_config,
+ )
+ return self
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def add_item(self, item: dict, new_fingerprint: str):
+ """Add item to Dataset.
+
+
+
+ Args:
+ item (`dict`):
+ Item data to be added.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
+ >>> ds = ds.add_item(new_review)
+ >>> ds[-1]
+ {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
+ ```
+ """
+ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()})
+ # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe"
+ dset_features, item_features = _align_features(
+ [self._info.features, Features.from_arrow_schema(item_table.schema)]
+ )
+ # Cast to align the schemas of the tables and concatenate the tables
+ table = concat_tables(
+ [
+ self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data,
+ item_table.cast(item_features.arrow_schema),
+ ]
+ )
+ if self._indices is None:
+ indices_table = None
+ else:
+ item_indices_array = pa.array([len(self._data)], type=pa.uint64())
+ item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"])
+ indices_table = concat_tables([self._indices, item_indices_table])
+ info = self.info.copy()
+ info.features.update(item_features)
+ table = update_metadata_with_features(table, info.features)
+ return Dataset(
+ table,
+ info=info,
+ split=self.split,
+ indices_table=indices_table,
+ fingerprint=new_fingerprint,
+ )
+
+ def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset":
+ """Align the dataset's label ID and label name mapping to match an input `label2id` mapping.
+ This is useful when you want to ensure that a model's predicted labels are aligned with the dataset.
+ The alignment in done using the lowercase label names.
+
+ Args:
+ label2id (`dict`):
+ The label name to ID mapping to align the dataset with.
+ label_column (`str`):
+ The column name of labels to align on.
+
+ Example:
+
+ ```python
+ >>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2}
+ >>> ds = load_dataset("glue", "mnli", split="train")
+ >>> # mapping to align with
+ >>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
+ >>> ds_aligned = ds.align_labels_with_mapping(label2id, "label")
+ ```
+
+ """
+ # Sanity checks
+ if label_column not in self._data.column_names:
+ raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).")
+
+ label_feature = self._info.features[label_column]
+ if not (
+ isinstance(label_feature, ClassLabel)
+ or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel))
+ ):
+ raise ValueError(
+ f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}."
+ )
+
+ # Sort input mapping by ID value to ensure the label names are aligned
+ label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))
+ label_names = list(label2id.keys())
+ # Some label mappings use uppercase label names so we lowercase them during alignment
+ label2id = {k.lower(): v for k, v in label2id.items()}
+ int2str_function = (
+ label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str
+ )
+
+ if isinstance(label_feature, ClassLabel):
+
+ def process_label_ids(batch):
+ dset_label_names = [
+ int2str_function(label_id).lower() if label_id is not None else None
+ for label_id in batch[label_column]
+ ]
+ batch[label_column] = [
+ label2id[label_name] if label_name is not None else None for label_name in dset_label_names
+ ]
+ return batch
+
+ else:
+
+ def process_label_ids(batch):
+ dset_label_names = [
+ [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq]
+ for seq in batch[label_column]
+ ]
+ batch[label_column] = [
+ [label2id[label_name] if label_name is not None else None for label_name in seq]
+ for seq in dset_label_names
+ ]
+ return batch
+
+ features = self.features
+ features[label_column] = (
+ ClassLabel(num_classes=len(label_names), names=label_names)
+ if isinstance(label_feature, ClassLabel)
+ else Sequence(ClassLabel(num_classes=len(label_names), names=label_names))
+ )
+ return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels")
+
+
+def _concatenate_map_style_datasets(
+ dsets: List[Dataset],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+):
+ """
+ Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`.
+ When you concatenate on axis 0, missing data are filled with None values.
+
+ Args:
+ dsets (`List[datasets.Dataset]`): List of Datasets to concatenate.
+ info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (:class:`NamedSplit`, optional): Name of the dataset split.
+ axis (``{0, 1}``, default ``0``, meaning over rows):
+ Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
+ (horizontally).
+
+ *New in version 1.6.0*
+
+ Example:
+
+ ```py
+ >>> ds3 = _concatenate_map_style_datasets([ds1, ds2])
+ ```
+ """
+ # Ignore datasets with no rows
+ if any(dset.num_rows > 0 for dset in dsets):
+ dsets = [dset for dset in dsets if dset.num_rows > 0]
+ else:
+ # Return first dataset if all datasets are empty
+ return dsets[0]
+
+ # Perform checks (and a potentional cast if axis=0)
+ if axis == 0:
+ _check_if_features_can_be_aligned([dset.features for dset in dsets])
+ else:
+ if not all(dset.num_rows == dsets[0].num_rows for dset in dsets):
+ raise ValueError("Number of rows must match for all datasets")
+ _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names])
+
+ # Find common format or reset format
+ format = dsets[0].format
+ if any(dset.format != format for dset in dsets):
+ format = {}
+ logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.")
+
+ def apply_offset_to_indices_table(table, offset):
+ if offset == 0:
+ return table
+ else:
+ array = table["indices"]
+ new_array = pc.add(array, pa.scalar(offset, type=pa.uint64()))
+ return InMemoryTable.from_arrays([new_array], names=["indices"])
+
+ # Concatenate indices if they exist
+ if any(dset._indices is not None for dset in dsets):
+ if axis == 0:
+ # Datasets with no indices tables are replaced with a dataset with an indices table in memory.
+ # Applying an offset to an indices table also brings the table in memory.
+ indices_tables = []
+ for i in range(len(dsets)):
+ if dsets[i]._indices is None:
+ dsets[i] = dsets[i]._select_with_indices_mapping(range(len(dsets[i])))
+ indices_tables.append(dsets[i]._indices)
+
+ # An offset needs to be applied to the indices before concatenating
+ offset = 0
+ for i in range(len(dsets)):
+ indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset)
+ offset += len(dsets[i]._data)
+
+ # Concatenate indices
+ indices_tables = [t for t in indices_tables if len(t) > 0]
+ if indices_tables:
+ indices_table = concat_tables(indices_tables)
+ else:
+ indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()}))
+ else:
+ if len(dsets) == 1:
+ indices_table = dsets[0]._indices
+ else:
+ for i in range(len(dsets)):
+ dsets[i] = dsets[i].flatten_indices()
+ indices_table = None
+ else:
+ indices_table = None
+
+ table = concat_tables([dset._data for dset in dsets], axis=axis)
+ if axis == 0:
+ features_list = _align_features([dset.features for dset in dsets])
+ else:
+ features_list = [dset.features for dset in dsets]
+ table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()})
+
+ # Concatenate infos
+ if info is None:
+ info = DatasetInfo.from_merge([dset.info for dset in dsets])
+ fingerprint = update_fingerprint(
+ "".join(dset._fingerprint for dset in dsets), _concatenate_map_style_datasets, {"info": info, "split": split}
+ )
+
+ # Make final concatenated dataset
+ concatenated_dataset = Dataset(
+ table,
+ info=info,
+ split=split,
+ indices_table=indices_table,
+ fingerprint=fingerprint,
+ )
+ concatenated_dataset.set_format(**format)
+ return concatenated_dataset
+
+
+def _interleave_map_style_datasets(
+ datasets: List["Dataset"],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ **kwargs,
+) -> "Dataset":
+ """
+ Interleave several map-style datasets (sources) into a single map-style dataset.
+ The new dataset is constructed by alternating between the sources to get the examples.
+ If `probabilities = None` (default) the new dataset is constructed by cycling between each source to get the examples.
+ If `probabilities` is not `None, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
+
+ Args:
+ datasets (`List[Dataset]`): list of datasets to interleave
+ probabilities (`List[float]`, optional, default None): If specified, the new dataset is constructed by sampling
+ examples from one source at a time according to these probabilities.
+ seed (`int`, optional, default None): The random seed used to choose a source for each example.
+ info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (:class:`NamedSplit`, optional): Name of the dataset split.
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+ **kwargs (additional keyword arguments): Keyword arguments to be passed to :meth:`datasets.Datasets.select` when selecting the indices used to interleave the datasets.
+
+ Output:
+ :class:`datasets.Dataset`
+ """
+ if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
+ raise ValueError(
+ f"{stopping_strategy} stopping strategy in `interleave_datasets` is not implemented yet with a list of {type(datasets[0])}"
+ )
+
+ # To interleave the datasets, we concatenate them and then we re-order the indices
+ concatenated_datasets = _concatenate_map_style_datasets(datasets, info=info, split=split)
+
+ # Let's now build the indices to pass to .select()
+ lengths = [len(dset) for dset in datasets]
+ offsets = np.cumsum([0] + lengths[:-1])
+
+ # if stopping_strategy is "first_exhausted", it is an undersampling situation whereas it is an oversampling situation if it is "all_exhausted"
+ oversampling = stopping_strategy == "all_exhausted"
+
+ if probabilities is None and not oversampling:
+ # Undersampling situation with cycling between each sources
+ # Example:: If lengths of the datasets are [3, 4, 5]
+ # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 6, 9]
+ # Note that we only have 3 examples per dataset since the first dataset ran out of examples
+
+ # Reasoning behind the following operation: keeping the min_length first indices of each dataset
+ # while offsetting in order to correspond to the right indices of the concatenated dataset
+ # and flattening to effectively interleave the datasets
+ indices = (offsets.reshape(1, -1) + np.arange(min(lengths)).reshape(-1, 1)).flatten().tolist()
+ elif probabilities is None:
+ # Oversampling situation with cycling between each sources
+ # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 5, 9, 0, 6, 10, 1, 3, 11]
+ # Note that we have 5 examples per dataset with a rolling window since the longest dataset has 5 samples
+
+ # Reasoning behind the following operation: for each dataset indices (i.e column) repeat the indices to have max_length indices per dataset
+ # For example, if the max_length is 5 and the i-th dataset has 3 samples, the i-th column will be [0,1,2,0,1]
+ indices = np.mod(np.arange(max(lengths)).reshape(-1, 1), np.array(lengths).reshape(1, -1))
+
+ # We have to keep the indices to their respective dataset offsets and to flatten to effectively interleave the datasets
+ indices = (indices + offsets).flatten().tolist()
+
+ else:
+ # boolean array indicating if at index i if the dataset_i has been fully exhausted
+ is_exhausted = np.full(len(lengths), False)
+
+ # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
+ # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
+ bool_strategy_func = np.all if oversampling else np.any
+
+ def iter_random_indices():
+ """Get an infinite iterator that randomly samples the index of the source to pick examples from."""
+ rng = np.random.default_rng(seed)
+ while True:
+ yield from (int(i) for i in rng.choice(len(datasets), size=1000, p=probabilities))
+
+ current_index = [0] * len(datasets)
+ indices = []
+ for source_idx in iter_random_indices():
+ # If no oversampling, we stop as soon as a dataset has ran out of examples (np.any)
+ # Otherwise, we stop as soon as every dataset has ran out of examples (np.all)
+ if bool_strategy_func(is_exhausted):
+ # the stopping condition was reached, let's stop
+ break
+
+ # let's add the example at the current index of the `source_idx`-th dataset
+ indices.append(current_index[source_idx] + offsets[source_idx])
+ current_index[source_idx] += 1
+
+ # we've ran out of examples for the current dataset, let's update our boolean array and bring the current_index back to 0
+ if current_index[source_idx] >= lengths[source_idx]:
+ is_exhausted[source_idx] = True
+ current_index[source_idx] = 0
+
+ return concatenated_datasets.select(indices, **kwargs)
+
+
+def _split_by_node_map_style_dataset(dataset: Dataset, rank: int, world_size: int) -> Dataset:
+ """
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
+
+ Args:
+ dataset ([`Dataset`]):
+ The dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`Dataset`]: The dataset to be used on the node at rank `rank`.
+ """
+ return dataset.shard(num_shards=world_size, index=rank, contiguous=True)
+
+
+# This is outside Dataset.filter as it needs to be picklable for multiprocessing
+
+
+def get_indices_from_mask_function(
+ function: Callable,
+ batched: bool,
+ with_indices: bool,
+ with_rank: bool,
+ input_columns: Optional[Union[str, List[str]]],
+ indices_mapping: Optional[Table] = None,
+ *args,
+ **fn_kwargs,
+):
+ if batched:
+ # we extract indices and rank from args
+ *inputs, indices, rank = args
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices,)
+ if with_rank:
+ additional_args += (rank,)
+ mask = function(*inputs, *additional_args, **fn_kwargs)
+ else:
+ # we get batched data (to do less look-ups) but `function` only accepts one example
+ # therefore we need to call `function` on each example of the batch to get the mask
+ *inputs, indices, rank = args
+ mask = []
+ if input_columns is None:
+ # inputs only contains a batch of examples
+ batch: dict = inputs[0]
+ num_examples = len(batch[next(iter(batch.keys()))])
+ for i in range(num_examples):
+ example = {key: batch[key][i] for key in batch}
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(example, *additional_args, **fn_kwargs))
+ else:
+ # inputs is a list of columns
+ columns: List[List] = inputs
+ num_examples = len(columns[0])
+ for i in range(num_examples):
+ input = [column[i] for column in columns]
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(*input, *additional_args, **fn_kwargs))
+ indices_array = [i for i, to_keep in zip(indices, mask) if to_keep]
+ if indices_mapping is not None:
+ indices_array = pa.array(indices_array, type=pa.uint64())
+ indices_array = indices_mapping.column(0).take(indices_array)
+ indices_array = indices_array.to_pylist()
+ return {"indices": indices_array}
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/arrow_reader.py b/env-llmeval/lib/python3.10/site-packages/datasets/arrow_reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ac14e28ce64bded311dc0f88a83edabc75c12bd
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/arrow_reader.py
@@ -0,0 +1,661 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Arrow ArrowReader."""
+
+import copy
+import math
+import os
+import re
+import shutil
+from dataclasses import dataclass
+from functools import partial
+from pathlib import Path
+from typing import TYPE_CHECKING, List, Optional, Union
+
+import pyarrow as pa
+import pyarrow.parquet as pq
+from tqdm.contrib.concurrent import thread_map
+
+from .download.download_config import DownloadConfig
+from .naming import _split_re, filenames_for_dataset_split
+from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.file_utils import cached_path
+
+
+if TYPE_CHECKING:
+ from .info import DatasetInfo # noqa: F401
+ from .splits import Split, SplitInfo # noqa: F401
+
+
+logger = logging.get_logger(__name__)
+
+HF_GCP_BASE_URL = "https://storage.googleapis.com/huggingface-nlp/cache/datasets"
+
+_SUB_SPEC_RE = re.compile(
+ rf"""
+^
+ (?P{_split_re[1:-1]})
+ (\[
+ ((?P-?\d+)
+ (?P%)?)?
+ :
+ ((?P-?\d+)
+ (?P%)?)?
+ \])?(\((?P[^\)]*)\))?
+$
+""", # remove ^ and $
+ re.X,
+)
+
+_ADDITION_SEP_RE = re.compile(r"\s*\+\s*")
+
+
+class DatasetNotOnHfGcsError(ConnectionError):
+ """When you can't get the dataset from the Hf google cloud storage"""
+
+ pass
+
+
+class MissingFilesOnHfGcsError(ConnectionError):
+ """When some files are missing on the Hf oogle cloud storage"""
+
+ pass
+
+
+@dataclass(frozen=True)
+class FileInstructions:
+ """The file instructions associated with a split ReadInstruction.
+
+ Attributes:
+ num_examples: `int`, The total number of examples
+ file_instructions: List[dict(filename, skip, take)], the files information.
+ The filenames contains the relative path, not absolute.
+ skip/take indicates which example read in the file: `ds.slice(skip, take)`
+ """
+
+ num_examples: int
+ file_instructions: List[dict]
+
+
+def make_file_instructions(
+ name: str,
+ split_infos: List["SplitInfo"],
+ instruction: Union[str, "ReadInstruction"],
+ filetype_suffix: Optional[str] = None,
+ prefix_path: Optional[str] = None,
+) -> FileInstructions:
+ """Returns instructions of the split dict.
+
+ Args:
+ name (`str`): Name of the dataset.
+ split_infos (`list` of `[SplitInfo]`): Dataset splits information.
+ instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset.
+ filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'.
+ prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name.
+
+ Returns:
+ [`FileInstructions`]
+ """
+ if not isinstance(name, str):
+ raise TypeError(f"Expected str 'name', but got: {type(name).__name__}")
+ elif not name:
+ raise ValueError("Expected non-empty str 'name'")
+ name2len = {info.name: info.num_examples for info in split_infos}
+ name2shard_lengths = {info.name: info.shard_lengths for info in split_infos}
+ name2filenames = {
+ info.name: filenames_for_dataset_split(
+ path=prefix_path,
+ dataset_name=name,
+ split=info.name,
+ filetype_suffix=filetype_suffix,
+ shard_lengths=name2shard_lengths[info.name],
+ )
+ for info in split_infos
+ }
+ if not isinstance(instruction, ReadInstruction):
+ instruction = ReadInstruction.from_spec(instruction)
+ # Create the absolute instruction (per split)
+ absolute_instructions = instruction.to_absolute(name2len)
+
+ # For each split, return the files instruction (skip/take)
+ file_instructions = []
+ num_examples = 0
+ for abs_instr in absolute_instructions:
+ split_length = name2len[abs_instr.splitname]
+ filenames = name2filenames[abs_instr.splitname]
+ shard_lengths = name2shard_lengths[abs_instr.splitname]
+ from_ = 0 if abs_instr.from_ is None else abs_instr.from_
+ to = split_length if abs_instr.to is None else abs_instr.to
+ if shard_lengths is None: # not sharded
+ for filename in filenames:
+ take = to - from_
+ if take == 0:
+ continue
+ num_examples += take
+ file_instructions.append({"filename": filename, "skip": from_, "take": take})
+ else: # sharded
+ index_start = 0 # Beginning (included) of moving window.
+ index_end = 0 # End (excluded) of moving window.
+ for filename, shard_length in zip(filenames, shard_lengths):
+ index_end += shard_length
+ if from_ < index_end and to > index_start: # There is something to take.
+ skip = from_ - index_start if from_ > index_start else 0
+ take = to - index_start - skip if to < index_end else -1
+ if take == 0:
+ continue
+ file_instructions.append({"filename": filename, "skip": skip, "take": take})
+ num_examples += shard_length - skip if take == -1 else take
+ index_start += shard_length
+ return FileInstructions(
+ num_examples=num_examples,
+ file_instructions=file_instructions,
+ )
+
+
+class BaseReader:
+ """
+ Build a Dataset object out of Instruction instance(s).
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ArrowReader.
+
+ Args:
+ path (str): path where tfrecords are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ self._path: str = path
+ self._info: Optional["DatasetInfo"] = info
+ self._filetype_suffix: Optional[str] = None
+
+ def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ raise NotImplementedError
+
+ def _read_files(self, files, in_memory=False) -> Table:
+ """Returns Dataset for given file instructions.
+
+ Args:
+ files: List[dict(filename, skip, take)], the files information.
+ The filenames contain the absolute path, not relative.
+ skip/take indicates which example read in the file: `ds.slice(skip, take)`
+ in_memory (bool, default False): Whether to copy the data in-memory.
+ """
+ if len(files) == 0 or not all(isinstance(f, dict) for f in files):
+ raise ValueError("please provide valid file informations")
+ files = copy.deepcopy(files)
+ for f in files:
+ f["filename"] = os.path.join(self._path, f["filename"])
+
+ pa_tables = thread_map(
+ partial(self._get_table_from_filename, in_memory=in_memory),
+ files,
+ tqdm_class=hf_tqdm,
+ desc="Loading dataset shards",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(files) <= 16 or None,
+ )
+ pa_tables = [t for t in pa_tables if len(t) > 0]
+ if not pa_tables and (self._info is None or self._info.features is None):
+ raise ValueError(
+ "Tried to read an empty table. Please specify at least info.features to create an empty table with the right type."
+ )
+ pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))]
+ pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0]
+ return pa_table
+
+ def get_file_instructions(self, name, instruction, split_infos):
+ """Return list of dict {'filename': str, 'skip': int, 'take': int}"""
+ file_instructions = make_file_instructions(
+ name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path
+ )
+ files = file_instructions.file_instructions
+ return files
+
+ def read(
+ self,
+ name,
+ instructions,
+ split_infos,
+ in_memory=False,
+ ):
+ """Returns Dataset instance(s).
+
+ Args:
+ name (str): name of the dataset.
+ instructions (ReadInstruction): instructions to read.
+ Instruction can be string and will then be passed to the Instruction
+ constructor as it.
+ split_infos (list of SplitInfo proto): the available splits for dataset.
+ in_memory (bool, default False): Whether to copy the data in-memory.
+
+ Returns:
+ kwargs to build a single Dataset instance.
+ """
+
+ files = self.get_file_instructions(name, instructions, split_infos)
+ if not files:
+ msg = f'Instruction "{instructions}" corresponds to no data!'
+ raise ValueError(msg)
+ return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory)
+
+ def read_files(
+ self,
+ files: List[dict],
+ original_instructions: Union[None, "ReadInstruction", "Split"] = None,
+ in_memory=False,
+ ):
+ """Returns single Dataset instance for the set of file instructions.
+
+ Args:
+ files: List[dict(filename, skip, take)], the files information.
+ The filenames contains the relative path, not absolute.
+ skip/take indicates which example read in the file: `ds.skip().take()`
+ original_instructions: store the original instructions used to build the dataset split in the dataset.
+ in_memory (bool, default False): Whether to copy the data in-memory.
+
+ Returns:
+ kwargs to build a Dataset instance.
+ """
+ # Prepend path to filename
+ pa_table = self._read_files(files, in_memory=in_memory)
+ # If original_instructions is not None, convert it to a human-readable NamedSplit
+ if original_instructions is not None:
+ from .splits import Split # noqa
+
+ split = Split(str(original_instructions))
+ else:
+ split = None
+ dataset_kwargs = {"arrow_table": pa_table, "info": self._info, "split": split}
+ return dataset_kwargs
+
+ def download_from_hf_gcs(self, download_config: DownloadConfig, relative_data_dir):
+ """
+ Download the dataset files from the Hf GCS
+
+ Args:
+ dl_cache_dir: `str`, the local cache directory used to download files
+ relative_data_dir: `str`, the relative directory of the remote files from
+ the `datasets` directory on GCS.
+
+ """
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ try:
+ remote_dataset_info = os.path.join(remote_cache_dir, "dataset_info.json")
+ downloaded_dataset_info = cached_path(
+ remote_dataset_info.replace(os.sep, "/"), download_config=download_config
+ )
+ shutil.move(downloaded_dataset_info, os.path.join(self._path, "dataset_info.json"))
+ if self._info is not None:
+ self._info.update(self._info.from_directory(self._path))
+ except FileNotFoundError as err:
+ raise DatasetNotOnHfGcsError(err) from None
+ try:
+ for split in self._info.splits:
+ file_instructions = self.get_file_instructions(
+ name=self._info.builder_name,
+ instruction=split,
+ split_infos=self._info.splits.values(),
+ )
+ for file_instruction in file_instructions:
+ file_to_download = str(Path(file_instruction["filename"]).relative_to(self._path))
+ remote_prepared_filename = os.path.join(remote_cache_dir, file_to_download)
+ downloaded_prepared_filename = cached_path(
+ remote_prepared_filename.replace(os.sep, "/"), download_config=download_config
+ )
+ shutil.move(downloaded_prepared_filename, file_instruction["filename"])
+ except FileNotFoundError as err:
+ raise MissingFilesOnHfGcsError(err) from None
+
+
+class ArrowReader(BaseReader):
+ """
+ Build a Dataset object out of Instruction instance(s).
+ This Reader uses either memory mapping or file descriptors (in-memory) on arrow files.
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ArrowReader.
+
+ Args:
+ path (str): path where Arrow files are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ super().__init__(path, info)
+ self._filetype_suffix = "arrow"
+
+ def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ filename, skip, take = (
+ filename_skip_take["filename"],
+ filename_skip_take["skip"] if "skip" in filename_skip_take else None,
+ filename_skip_take["take"] if "take" in filename_skip_take else None,
+ )
+ table = ArrowReader.read_table(filename, in_memory=in_memory)
+ if take == -1:
+ take = len(table) - skip
+ # here we don't want to slice an empty table, or it may segfault
+ if skip is not None and take is not None and not (skip == 0 and take == len(table)):
+ table = table.slice(skip, take)
+ return table
+
+ @staticmethod
+ def read_table(filename, in_memory=False) -> Table:
+ """
+ Read table from file.
+
+ Args:
+ filename (str): File name of the table.
+ in_memory (bool, default=False): Whether to copy the data in-memory.
+
+ Returns:
+ pyarrow.Table
+ """
+ table_cls = InMemoryTable if in_memory else MemoryMappedTable
+ return table_cls.from_file(filename)
+
+
+class ParquetReader(BaseReader):
+ """
+ Build a Dataset object out of Instruction instance(s).
+ This Reader uses memory mapping on parquet files.
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ParquetReader.
+
+ Args:
+ path (str): path where tfrecords are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ super().__init__(path, info)
+ self._filetype_suffix = "parquet"
+
+ def _get_table_from_filename(self, filename_skip_take, **kwargs):
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ filename, skip, take = (
+ filename_skip_take["filename"],
+ filename_skip_take["skip"] if "skip" in filename_skip_take else None,
+ filename_skip_take["take"] if "take" in filename_skip_take else None,
+ )
+ # Parquet read_table always loads data in memory, independently of memory_map
+ pa_table = pq.read_table(filename, memory_map=True)
+ # here we don't want to slice an empty table, or it may segfault
+ if skip is not None and take is not None and not (skip == 0 and take == len(pa_table)):
+ pa_table = pa_table.slice(skip, take)
+ return pa_table
+
+
+@dataclass(frozen=True)
+class _AbsoluteInstruction:
+ """A machine friendly slice: defined absolute positive boundaries."""
+
+ splitname: str
+ from_: int # uint (starting index).
+ to: int # uint (ending index).
+
+
+@dataclass(frozen=True)
+class _RelativeInstruction:
+ """Represents a single parsed slicing instruction, can use % and negatives."""
+
+ splitname: str
+ from_: Optional[int] = None # int (starting index) or None if no lower boundary.
+ to: Optional[int] = None # int (ending index) or None if no upper boundary.
+ unit: Optional[str] = None
+ rounding: Optional[str] = None
+
+ def __post_init__(self):
+ if self.unit is not None and self.unit not in ["%", "abs"]:
+ raise ValueError("unit must be either % or abs")
+ if self.rounding is not None and self.rounding not in ["closest", "pct1_dropremainder"]:
+ raise ValueError("rounding must be either closest or pct1_dropremainder")
+ if self.unit != "%" and self.rounding is not None:
+ raise ValueError("It is forbidden to specify rounding if not using percent slicing.")
+ if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100:
+ raise ValueError("Percent slice boundaries must be > -100 and < 100.")
+ if self.unit == "%" and self.to is not None and abs(self.to) > 100:
+ raise ValueError("Percent slice boundaries must be > -100 and < 100.")
+ # Update via __dict__ due to instance being "frozen"
+ self.__dict__["rounding"] = "closest" if self.rounding is None and self.unit == "%" else self.rounding
+
+
+def _str_to_read_instruction(spec):
+ """Returns ReadInstruction for given string."""
+ res = _SUB_SPEC_RE.match(spec)
+ if not res:
+ raise ValueError(f"Unrecognized instruction format: {spec}")
+ unit = "%" if res.group("from_pct") or res.group("to_pct") else "abs"
+ return ReadInstruction(
+ split_name=res.group("split"),
+ rounding=res.group("rounding"),
+ from_=int(res.group("from")) if res.group("from") else None,
+ to=int(res.group("to")) if res.group("to") else None,
+ unit=unit,
+ )
+
+
+def _pct_to_abs_pct1(boundary, num_examples):
+ # Using math.trunc here, since -99.5% should give -99%, not -100%.
+ if num_examples < 100:
+ msg = (
+ 'Using "pct1_dropremainder" rounding on a split with less than 100 '
+ "elements is forbidden: it always results in an empty dataset."
+ )
+ raise ValueError(msg)
+ return boundary * math.trunc(num_examples / 100.0)
+
+
+def _pct_to_abs_closest(boundary, num_examples):
+ return int(round(boundary * num_examples / 100.0))
+
+
+def _rel_to_abs_instr(rel_instr, name2len):
+ """Returns _AbsoluteInstruction instance for given RelativeInstruction.
+
+ Args:
+ rel_instr: RelativeInstruction instance.
+ name2len: dict {split_name: num_examples}.
+ """
+ pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == "closest" else _pct_to_abs_pct1
+ split = rel_instr.splitname
+ if split not in name2len:
+ raise ValueError(f'Unknown split "{split}". Should be one of {list(name2len)}.')
+ num_examples = name2len[split]
+ from_ = rel_instr.from_
+ to = rel_instr.to
+ if rel_instr.unit == "%":
+ from_ = 0 if from_ is None else pct_to_abs(from_, num_examples)
+ to = num_examples if to is None else pct_to_abs(to, num_examples)
+ else:
+ from_ = 0 if from_ is None else from_
+ to = num_examples if to is None else to
+ if from_ < 0:
+ from_ = max(num_examples + from_, 0)
+ if to < 0:
+ to = max(num_examples + to, 0)
+ from_ = min(from_, num_examples)
+ to = min(to, num_examples)
+ return _AbsoluteInstruction(split, from_, to)
+
+
+class ReadInstruction:
+ """Reading instruction for a dataset.
+
+ Examples::
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%]')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
+ 'test', from_=0, to=33, unit='%'))
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
+ 'test[:33%]+train[1:-1]'))
+ ds = datasets.load_dataset('mnist', split=(
+ datasets.ReadInstruction('test', to=33, unit='%') +
+ datasets.ReadInstruction('train', from_=1, to=-1, unit='abs')))
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
+ 'test[:33%](pct1_dropremainder)'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
+ 'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder"))
+
+ # 10-fold validation:
+ tests = datasets.load_dataset(
+ 'mnist',
+ [datasets.ReadInstruction('train', from_=k, to=k+10, unit='%')
+ for k in range(0, 100, 10)])
+ trains = datasets.load_dataset(
+ 'mnist',
+ [datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%')
+ for k in range(0, 100, 10)])
+
+ """
+
+ def _init(self, relative_instructions):
+ # Private initializer.
+ self._relative_instructions = relative_instructions
+
+ @classmethod
+ def _read_instruction_from_relative_instructions(cls, relative_instructions):
+ """Returns ReadInstruction obj initialized with relative_instructions."""
+ # Use __new__ to bypass __init__ used by public API and not conveniant here.
+ result = cls.__new__(cls)
+ result._init(relative_instructions) # pylint: disable=protected-access
+ return result
+
+ def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None):
+ """Initialize ReadInstruction.
+
+ Args:
+ split_name (str): name of the split to read. Eg: 'train'.
+ rounding (str, optional): The rounding behaviour to use when percent slicing is
+ used. Ignored when slicing with absolute indices.
+ Possible values:
+ - 'closest' (default): The specified percentages are rounded to the
+ closest value. Use this if you want specified percents to be as
+ much exact as possible.
+ - 'pct1_dropremainder': the specified percentages are treated as
+ multiple of 1%. Use this option if you want consistency. Eg:
+ len(5%) == 5 * len(1%).
+ Using this option, one might not be able to use the full set of
+ examples, if the number of those is not a multiple of 100.
+ from_ (int):
+ to (int): alternative way of specifying slicing boundaries. If any of
+ {from_, to, unit} argument is used, slicing cannot be specified as
+ string.
+ unit (str): optional, one of:
+ '%': to set the slicing unit as percents of the split size.
+ 'abs': to set the slicing unit as absolute numbers.
+ """
+ # This constructor is not always called. See factory method
+ # `_read_instruction_from_relative_instructions`. Common init instructions
+ # MUST be placed in the _init method.
+ self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)])
+
+ @classmethod
+ def from_spec(cls, spec):
+ """Creates a `ReadInstruction` instance out of a string spec.
+
+ Args:
+ spec (`str`):
+ Split(s) + optional slice(s) to read + optional rounding
+ if percents are used as the slicing unit. A slice can be specified,
+ using absolute numbers (`int`) or percentages (`int`).
+
+ Examples:
+
+ ```
+ test: test split.
+ test + validation: test split + validation split.
+ test[10:]: test split, minus its first 10 records.
+ test[:10%]: first 10% records of test split.
+ test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding.
+ test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train.
+ ```
+
+ Returns:
+ ReadInstruction instance.
+ """
+ spec = str(spec) # Need to convert to str in case of NamedSplit instance.
+ subs = _ADDITION_SEP_RE.split(spec)
+ if not subs:
+ raise ValueError(f"No instructions could be built out of {spec}")
+ instruction = _str_to_read_instruction(subs[0])
+ return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction)
+
+ def to_spec(self):
+ rel_instr_specs = []
+ for rel_instr in self._relative_instructions:
+ rel_instr_spec = rel_instr.splitname
+ if rel_instr.from_ is not None or rel_instr.to is not None:
+ from_ = rel_instr.from_
+ to = rel_instr.to
+ unit = rel_instr.unit
+ rounding = rel_instr.rounding
+ unit = unit if unit == "%" else ""
+ from_ = str(from_) + unit if from_ is not None else ""
+ to = str(to) + unit if to is not None else ""
+ slice_str = f"[{from_}:{to}]"
+ rounding_str = (
+ f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else ""
+ )
+ rel_instr_spec += slice_str + rounding_str
+ rel_instr_specs.append(rel_instr_spec)
+ return "+".join(rel_instr_specs)
+
+ def __add__(self, other):
+ """Returns a new ReadInstruction obj, result of appending other to self."""
+ if not isinstance(other, ReadInstruction):
+ msg = "ReadInstruction can only be added to another ReadInstruction obj."
+ raise TypeError(msg)
+ self_ris = self._relative_instructions
+ other_ris = other._relative_instructions # pylint: disable=protected-access
+ if (
+ self_ris[0].unit != "abs"
+ and other_ris[0].unit != "abs"
+ and self._relative_instructions[0].rounding != other_ris[0].rounding
+ ):
+ raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.")
+ return self._read_instruction_from_relative_instructions(self_ris + other_ris)
+
+ def __str__(self):
+ return self.to_spec()
+
+ def __repr__(self):
+ return f"ReadInstruction({self._relative_instructions})"
+
+ def to_absolute(self, name2len):
+ """Translate instruction into a list of absolute instructions.
+
+ Those absolute instructions are then to be added together.
+
+ Args:
+ name2len (`dict`):
+ Associating split names to number of examples.
+
+ Returns:
+ list of _AbsoluteInstruction instances (corresponds to the + in spec).
+ """
+ return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions]
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/arrow_writer.py b/env-llmeval/lib/python3.10/site-packages/datasets/arrow_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fed7e14e389174657b68c74c6043177e46f7302
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/arrow_writer.py
@@ -0,0 +1,745 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""To write records into Parquet files."""
+
+import errno
+import json
+import os
+import sys
+from pathlib import Path
+from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
+
+import fsspec
+import numpy as np
+import pyarrow as pa
+import pyarrow.parquet as pq
+
+from . import config
+from .features import Features, Image, Value
+from .features.features import (
+ FeatureType,
+ _ArrayXDExtensionType,
+ cast_to_python_objects,
+ generate_from_arrow_type,
+ get_nested_type,
+ list_of_np_array_to_pyarrow_listarray,
+ numpy_to_pyarrow_listarray,
+ to_pyarrow_listarray,
+)
+from .filesystems import is_remote_filesystem
+from .info import DatasetInfo
+from .keyhash import DuplicatedKeysError, KeyHasher
+from .table import array_cast, cast_array_to_feature, embed_table_storage, table_cast
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.file_utils import hash_url_to_filename
+from .utils.py_utils import asdict, first_non_null_value
+
+
+logger = logging.get_logger(__name__)
+
+type_ = type # keep python's type function
+
+
+class SchemaInferenceError(ValueError):
+ pass
+
+
+class TypedSequence:
+ """
+ This data container generalizes the typing when instantiating pyarrow arrays, tables or batches.
+
+ More specifically it adds several features:
+ - Support extension types like ``datasets.features.Array2DExtensionType``:
+ By default pyarrow arrays don't return extension arrays. One has to call
+ ``pa.ExtensionArray.from_storage(type, pa.array(data, type.storage_type))``
+ in order to get an extension array.
+ - Support for ``try_type`` parameter that can be used instead of ``type``:
+ When an array is transformed, we like to keep the same type as before if possible.
+ For example when calling :func:`datasets.Dataset.map`, we don't want to change the type
+ of each column by default.
+ - Better error message when a pyarrow array overflows.
+
+ Example::
+
+ from datasets.features import Array2D, Array2DExtensionType, Value
+ from datasets.arrow_writer import TypedSequence
+ import pyarrow as pa
+
+ arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32")))
+ assert arr.type == pa.int32()
+
+ arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32")))
+ assert arr.type == pa.int32()
+
+ arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int32")))
+ assert arr.type == pa.string()
+
+ arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")))
+ assert arr.type == Array2DExtensionType((1, 3), "int64")
+
+ table = pa.Table.from_pydict({
+ "image": TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))
+ })
+ assert table["image"].type == Array2DExtensionType((1, 3), "int64")
+
+ """
+
+ def __init__(
+ self,
+ data: Iterable,
+ type: Optional[FeatureType] = None,
+ try_type: Optional[FeatureType] = None,
+ optimized_int_type: Optional[FeatureType] = None,
+ ):
+ # assert type is None or try_type is None,
+ if type is not None and try_type is not None:
+ raise ValueError("You cannot specify both type and try_type")
+ # set attributes
+ self.data = data
+ self.type = type
+ self.try_type = try_type # is ignored if it doesn't match the data
+ self.optimized_int_type = optimized_int_type
+ # when trying a type (is ignored if data is not compatible)
+ self.trying_type = self.try_type is not None
+ self.trying_int_optimization = optimized_int_type is not None and type is None and try_type is None
+ # used to get back the inferred type after __arrow_array__() is called once
+ self._inferred_type = None
+
+ def get_inferred_type(self) -> FeatureType:
+ """Return the inferred feature type.
+ This is done by converting the sequence to an Arrow array, and getting the corresponding
+ feature type.
+
+ Since building the Arrow array can be expensive, the value of the inferred type is cached
+ as soon as pa.array is called on the typed sequence.
+
+ Returns:
+ FeatureType: inferred feature type of the sequence.
+ """
+ if self._inferred_type is None:
+ self._inferred_type = generate_from_arrow_type(pa.array(self).type)
+ return self._inferred_type
+
+ @staticmethod
+ def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]:
+ """Implement type inference for custom objects like PIL.Image.Image -> Image type.
+
+ This function is only used for custom python objects that can't be direclty passed to build
+ an Arrow array. In such cases is infers the feature type to use, and it encodes the data so
+ that they can be passed to an Arrow array.
+
+ Args:
+ data (Iterable): array of data to infer the type, e.g. a list of PIL images.
+
+ Returns:
+ Tuple[Iterable, Optional[FeatureType]]: a tuple with:
+ - the (possibly encoded) array, if the inferred feature type requires encoding
+ - the inferred feature type if the array is made of supported custom objects like
+ PIL images, else None.
+ """
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ non_null_idx, non_null_value = first_non_null_value(data)
+ if isinstance(non_null_value, PIL.Image.Image):
+ return [Image().encode_example(value) if value is not None else None for value in data], Image()
+ return data, None
+
+ def __arrow_array__(self, type: Optional[pa.DataType] = None):
+ """This function is called when calling pa.array(typed_sequence)"""
+
+ if type is not None:
+ raise ValueError("TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)")
+ del type # make sure we don't use it
+ data = self.data
+ # automatic type inference for custom objects
+ if self.type is None and self.try_type is None:
+ data, self._inferred_type = self._infer_custom_type_and_encode(data)
+ if self._inferred_type is None:
+ type = self.try_type if self.trying_type else self.type
+ else:
+ type = self._inferred_type
+ pa_type = get_nested_type(type) if type is not None else None
+ optimized_int_pa_type = (
+ get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None
+ )
+ trying_cast_to_python_objects = False
+ try:
+ # custom pyarrow types
+ if isinstance(pa_type, _ArrayXDExtensionType):
+ storage = to_pyarrow_listarray(data, pa_type)
+ return pa.ExtensionArray.from_storage(pa_type, storage)
+
+ # efficient np array to pyarrow array
+ if isinstance(data, np.ndarray):
+ out = numpy_to_pyarrow_listarray(data)
+ elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray):
+ out = list_of_np_array_to_pyarrow_listarray(data)
+ else:
+ trying_cast_to_python_objects = True
+ out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
+ # use smaller integer precisions if possible
+ if self.trying_int_optimization:
+ if pa.types.is_int64(out.type):
+ out = out.cast(optimized_int_pa_type)
+ elif pa.types.is_list(out.type):
+ if pa.types.is_int64(out.type.value_type):
+ out = array_cast(out, pa.list_(optimized_int_pa_type))
+ elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type):
+ out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type)))
+ # otherwise we can finally use the user's type
+ elif type is not None:
+ # We use cast_array_to_feature to support casting to custom types like Audio and Image
+ # Also, when trying type "string", we don't want to convert integers or floats to "string".
+ # We only do it if trying_type is False - since this is what the user asks for.
+ out = cast_array_to_feature(out, type, allow_number_to_str=not self.trying_type)
+ return out
+ except (
+ TypeError,
+ pa.lib.ArrowInvalid,
+ pa.lib.ArrowNotImplementedError,
+ ) as e: # handle type errors and overflows
+ # Ignore ArrowNotImplementedError caused by trying type, otherwise re-raise
+ if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError):
+ raise
+
+ if self.trying_type:
+ try: # second chance
+ if isinstance(data, np.ndarray):
+ return numpy_to_pyarrow_listarray(data)
+ elif isinstance(data, list) and data and any(isinstance(value, np.ndarray) for value in data):
+ return list_of_np_array_to_pyarrow_listarray(data)
+ else:
+ trying_cast_to_python_objects = True
+ return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
+ except pa.lib.ArrowInvalid as e:
+ if "overflow" in str(e):
+ raise OverflowError(
+ f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
+ ) from None
+ elif self.trying_int_optimization and "not in range" in str(e):
+ optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
+ logger.info(
+ f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64."
+ )
+ return out
+ elif trying_cast_to_python_objects and "Could not convert" in str(e):
+ out = pa.array(
+ cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)
+ )
+ if type is not None:
+ out = cast_array_to_feature(out, type, allow_number_to_str=True)
+ return out
+ else:
+ raise
+ elif "overflow" in str(e):
+ raise OverflowError(
+ f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
+ ) from None
+ elif self.trying_int_optimization and "not in range" in str(e):
+ optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
+ logger.info(f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.")
+ return out
+ elif trying_cast_to_python_objects and "Could not convert" in str(e):
+ out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False))
+ if type is not None:
+ out = cast_array_to_feature(out, type, allow_number_to_str=True)
+ return out
+ else:
+ raise
+
+
+class OptimizedTypedSequence(TypedSequence):
+ def __init__(
+ self,
+ data,
+ type: Optional[FeatureType] = None,
+ try_type: Optional[FeatureType] = None,
+ col: Optional[str] = None,
+ optimized_int_type: Optional[FeatureType] = None,
+ ):
+ optimized_int_type_by_col = {
+ "attention_mask": Value("int8"), # binary tensor
+ "special_tokens_mask": Value("int8"),
+ "input_ids": Value("int32"), # typical vocab size: 0-50k (max ~500k, never > 1M)
+ "token_type_ids": Value(
+ "int8"
+ ), # binary mask; some (XLNetModel) use an additional token represented by a 2
+ }
+ if type is None and try_type is None:
+ optimized_int_type = optimized_int_type_by_col.get(col, None)
+ super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type)
+
+
+class ArrowWriter:
+ """Shuffles and writes Examples to Arrow files."""
+
+ _WRITER_CLASS = pa.RecordBatchStreamWriter
+
+ def __init__(
+ self,
+ schema: Optional[pa.Schema] = None,
+ features: Optional[Features] = None,
+ path: Optional[str] = None,
+ stream: Optional[pa.NativeFile] = None,
+ fingerprint: Optional[str] = None,
+ writer_batch_size: Optional[int] = None,
+ hash_salt: Optional[str] = None,
+ check_duplicates: Optional[bool] = False,
+ disable_nullable: bool = False,
+ update_features: bool = False,
+ with_metadata: bool = True,
+ unit: str = "examples",
+ embed_local_files: bool = False,
+ storage_options: Optional[dict] = None,
+ ):
+ if path is None and stream is None:
+ raise ValueError("At least one of path and stream must be provided.")
+ if features is not None:
+ self._features = features
+ self._schema = None
+ elif schema is not None:
+ self._schema: pa.Schema = schema
+ self._features = Features.from_arrow_schema(self._schema)
+ else:
+ self._features = None
+ self._schema = None
+
+ if hash_salt is not None:
+ # Create KeyHasher instance using split name as hash salt
+ self._hasher = KeyHasher(hash_salt)
+ else:
+ self._hasher = KeyHasher("")
+
+ self._check_duplicates = check_duplicates
+ self._disable_nullable = disable_nullable
+
+ if stream is None:
+ fs_token_paths = fsspec.get_fs_token_paths(path, storage_options=storage_options)
+ self._fs: fsspec.AbstractFileSystem = fs_token_paths[0]
+ self._path = (
+ fs_token_paths[2][0]
+ if not is_remote_filesystem(self._fs)
+ else self._fs.unstrip_protocol(fs_token_paths[2][0])
+ )
+ self.stream = self._fs.open(fs_token_paths[2][0], "wb")
+ self._closable_stream = True
+ else:
+ self._fs = None
+ self._path = None
+ self.stream = stream
+ self._closable_stream = False
+
+ self.fingerprint = fingerprint
+ self.disable_nullable = disable_nullable
+ self.writer_batch_size = writer_batch_size or config.DEFAULT_MAX_BATCH_SIZE
+ self.update_features = update_features
+ self.with_metadata = with_metadata
+ self.unit = unit
+ self.embed_local_files = embed_local_files
+
+ self._num_examples = 0
+ self._num_bytes = 0
+ self.current_examples: List[Tuple[Dict[str, Any], str]] = []
+ self.current_rows: List[pa.Table] = []
+ self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None
+ self.hkey_record = []
+
+ def __len__(self):
+ """Return the number of writed and staged examples"""
+ return self._num_examples + len(self.current_examples) + len(self.current_rows)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+ def close(self):
+ # Try closing if opened; if closed: pyarrow.lib.ArrowInvalid: Invalid operation on closed file
+ if self.pa_writer: # it might be None
+ try:
+ self.pa_writer.close()
+ except Exception: # pyarrow.lib.ArrowInvalid, OSError
+ pass
+ if self._closable_stream and not self.stream.closed:
+ self.stream.close() # This also closes self.pa_writer if it is opened
+
+ def _build_writer(self, inferred_schema: pa.Schema):
+ schema = self.schema
+ inferred_features = Features.from_arrow_schema(inferred_schema)
+ if self._features is not None:
+ if self.update_features: # keep original features it they match, or update them
+ fields = {field.name: field for field in self._features.type}
+ for inferred_field in inferred_features.type:
+ name = inferred_field.name
+ if name in fields:
+ if inferred_field == fields[name]:
+ inferred_features[name] = self._features[name]
+ self._features = inferred_features
+ schema: pa.Schema = inferred_schema
+ else:
+ self._features = inferred_features
+ schema: pa.Schema = inferred_features.arrow_schema
+ if self.disable_nullable:
+ schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in schema)
+ if self.with_metadata:
+ schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint))
+ else:
+ schema = schema.with_metadata({})
+ self._schema = schema
+ self.pa_writer = self._WRITER_CLASS(self.stream, schema)
+
+ @property
+ def schema(self):
+ _schema = (
+ self._schema
+ if self._schema is not None
+ else (pa.schema(self._features.type) if self._features is not None else None)
+ )
+ if self._disable_nullable and _schema is not None:
+ _schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in _schema)
+ return _schema if _schema is not None else []
+
+ @staticmethod
+ def _build_metadata(info: DatasetInfo, fingerprint: Optional[str] = None) -> Dict[str, str]:
+ info_keys = ["features"] # we can add support for more DatasetInfo keys in the future
+ info_as_dict = asdict(info)
+ metadata = {}
+ metadata["info"] = {key: info_as_dict[key] for key in info_keys}
+ if fingerprint is not None:
+ metadata["fingerprint"] = fingerprint
+ return {"huggingface": json.dumps(metadata)}
+
+ def write_examples_on_file(self):
+ """Write stored examples from the write-pool of examples. It makes a table out of the examples and write it."""
+ if not self.current_examples:
+ return
+ # preserve the order the columns
+ if self.schema:
+ schema_cols = set(self.schema.names)
+ examples_cols = self.current_examples[0][0].keys() # .keys() preserves the order (unlike set)
+ common_cols = [col for col in self.schema.names if col in examples_cols]
+ extra_cols = [col for col in examples_cols if col not in schema_cols]
+ cols = common_cols + extra_cols
+ else:
+ cols = list(self.current_examples[0][0])
+ batch_examples = {}
+ for col in cols:
+ # We use row[0][col] since current_examples contains (example, key) tuples.
+ # Morever, examples could be Arrow arrays of 1 element.
+ # This can happen in `.map()` when we want to re-write the same Arrow data
+ if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples):
+ arrays = [row[0][col] for row in self.current_examples]
+ arrays = [
+ chunk
+ for array in arrays
+ for chunk in (array.chunks if isinstance(array, pa.ChunkedArray) else [array])
+ ]
+ batch_examples[col] = pa.concat_arrays(arrays)
+ else:
+ batch_examples[col] = [
+ row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]
+ for row in self.current_examples
+ ]
+ self.write_batch(batch_examples=batch_examples)
+ self.current_examples = []
+
+ def write_rows_on_file(self):
+ """Write stored rows from the write-pool of rows. It concatenates the single-row tables and it writes the resulting table."""
+ if not self.current_rows:
+ return
+ table = pa.concat_tables(self.current_rows)
+ self.write_table(table)
+ self.current_rows = []
+
+ def write(
+ self,
+ example: Dict[str, Any],
+ key: Optional[Union[str, int, bytes]] = None,
+ writer_batch_size: Optional[int] = None,
+ ):
+ """Add a given (Example,Key) pair to the write-pool of examples which is written to file.
+
+ Args:
+ example: the Example to add.
+ key: Optional, a unique identifier(str, int or bytes) associated with each example
+ """
+ # Utilize the keys and duplicate checking when `self._check_duplicates` is passed True
+ if self._check_duplicates:
+ # Create unique hash from key and store as (key, example) pairs
+ hash = self._hasher.hash(key)
+ self.current_examples.append((example, hash))
+ # Maintain record of keys and their respective hashes for checking duplicates
+ self.hkey_record.append((hash, key))
+ else:
+ # Store example as a tuple so as to keep the structure of `self.current_examples` uniform
+ self.current_examples.append((example, ""))
+
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size:
+ if self._check_duplicates:
+ self.check_duplicate_keys()
+ # Re-intializing to empty list for next batch
+ self.hkey_record = []
+
+ self.write_examples_on_file()
+
+ def check_duplicate_keys(self):
+ """Raises error if duplicates found in a batch"""
+ tmp_record = set()
+ for hash, key in self.hkey_record:
+ if hash in tmp_record:
+ duplicate_key_indices = [
+ str(self._num_examples + index)
+ for index, (duplicate_hash, _) in enumerate(self.hkey_record)
+ if duplicate_hash == hash
+ ]
+
+ raise DuplicatedKeysError(key, duplicate_key_indices)
+ else:
+ tmp_record.add(hash)
+
+ def write_row(self, row: pa.Table, writer_batch_size: Optional[int] = None):
+ """Add a given single-row Table to the write-pool of rows which is written to file.
+
+ Args:
+ row: the row to add.
+ """
+ if len(row) != 1:
+ raise ValueError(f"Only single-row pyarrow tables are allowed but got table with {len(row)} rows.")
+ self.current_rows.append(row)
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size:
+ self.write_rows_on_file()
+
+ def write_batch(
+ self,
+ batch_examples: Dict[str, List],
+ writer_batch_size: Optional[int] = None,
+ ):
+ """Write a batch of Example to file.
+ Ignores the batch if it appears to be empty,
+ preventing a potential schema update of unknown types.
+
+ Args:
+ batch_examples: the batch of examples to add.
+ """
+ if batch_examples and len(next(iter(batch_examples.values()))) == 0:
+ return
+ features = None if self.pa_writer is None and self.update_features else self._features
+ try_features = self._features if self.pa_writer is None and self.update_features else None
+ arrays = []
+ inferred_features = Features()
+ # preserve the order the columns
+ if self.schema:
+ schema_cols = set(self.schema.names)
+ batch_cols = batch_examples.keys() # .keys() preserves the order (unlike set)
+ common_cols = [col for col in self.schema.names if col in batch_cols]
+ extra_cols = [col for col in batch_cols if col not in schema_cols]
+ cols = common_cols + extra_cols
+ else:
+ cols = list(batch_examples)
+ for col in cols:
+ col_values = batch_examples[col]
+ col_type = features[col] if features else None
+ if isinstance(col_values, (pa.Array, pa.ChunkedArray)):
+ array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values
+ arrays.append(array)
+ inferred_features[col] = generate_from_arrow_type(col_values.type)
+ else:
+ col_try_type = try_features[col] if try_features is not None and col in try_features else None
+ typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)
+ arrays.append(pa.array(typed_sequence))
+ inferred_features[col] = typed_sequence.get_inferred_type()
+ schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema
+ pa_table = pa.Table.from_arrays(arrays, schema=schema)
+ self.write_table(pa_table, writer_batch_size)
+
+ def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None):
+ """Write a Table to file.
+
+ Args:
+ example: the Table to add.
+ """
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if self.pa_writer is None:
+ self._build_writer(inferred_schema=pa_table.schema)
+ pa_table = pa_table.combine_chunks()
+ pa_table = table_cast(pa_table, self._schema)
+ if self.embed_local_files:
+ pa_table = embed_table_storage(pa_table)
+ self._num_bytes += pa_table.nbytes
+ self._num_examples += pa_table.num_rows
+ self.pa_writer.write_table(pa_table, writer_batch_size)
+
+ def finalize(self, close_stream=True):
+ self.write_rows_on_file()
+ # In case current_examples < writer_batch_size, but user uses finalize()
+ if self._check_duplicates:
+ self.check_duplicate_keys()
+ # Re-intializing to empty list for next batch
+ self.hkey_record = []
+ self.write_examples_on_file()
+ # If schema is known, infer features even if no examples were written
+ if self.pa_writer is None and self.schema:
+ self._build_writer(self.schema)
+ if self.pa_writer is not None:
+ self.pa_writer.close()
+ self.pa_writer = None
+ if close_stream:
+ self.stream.close()
+ else:
+ if close_stream:
+ self.stream.close()
+ raise SchemaInferenceError("Please pass `features` or at least one example when writing data")
+ logger.debug(
+ f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {self._path if self._path else ''}."
+ )
+ return self._num_examples, self._num_bytes
+
+
+class ParquetWriter(ArrowWriter):
+ _WRITER_CLASS = pq.ParquetWriter
+
+
+class BeamWriter:
+ """
+ Shuffles and writes Examples to Arrow files.
+ The Arrow files are converted from Parquet files that are the output of Apache Beam pipelines.
+ """
+
+ def __init__(
+ self,
+ features: Optional[Features] = None,
+ schema: Optional[pa.Schema] = None,
+ path: Optional[str] = None,
+ namespace: Optional[str] = None,
+ cache_dir: Optional[str] = None,
+ ):
+ if features is None and schema is None:
+ raise ValueError("At least one of features and schema must be provided.")
+ if path is None:
+ raise ValueError("Path must be provided.")
+
+ if features is not None:
+ self._features: Features = features
+ self._schema: pa.Schema = features.arrow_schema
+ else:
+ self._schema: pa.Schema = schema
+ self._features: Features = Features.from_arrow_schema(schema)
+
+ self._path = path
+ self._parquet_path = os.path.splitext(path)[0] # remove extension
+ self._namespace = namespace or "default"
+ self._num_examples = None
+ self._cache_dir = cache_dir or config.HF_DATASETS_CACHE
+
+ def write_from_pcollection(self, pcoll_examples):
+ """Add the final steps of the beam pipeline: write to parquet files."""
+ import apache_beam as beam
+
+ def inc_num_examples(example):
+ beam.metrics.Metrics.counter(self._namespace, "num_examples").inc()
+
+ # count examples
+ _ = pcoll_examples | "Count N. Examples" >> beam.Map(inc_num_examples)
+
+ # save dataset
+ return (
+ pcoll_examples
+ | "Get values" >> beam.Values()
+ | "Save to parquet"
+ >> beam.io.parquetio.WriteToParquet(
+ self._parquet_path, self._schema, shard_name_template="-SSSSS-of-NNNNN.parquet"
+ )
+ )
+
+ def finalize(self, metrics_query_result: dict):
+ """
+ Run after the pipeline has finished.
+ It converts the resulting parquet files to arrow and it completes the info from the pipeline metrics.
+
+ Args:
+ metrics_query_result: `dict` obtained from pipeline_results.metrics().query(m_filter). Make sure
+ that the filter keeps only the metrics for the considered split, under the namespace `split_name`.
+ """
+
+ # Beam FileSystems require the system's path separator in the older versions
+ fs, _, [parquet_path] = fsspec.get_fs_token_paths(self._parquet_path)
+ parquet_path = str(Path(parquet_path)) if not is_remote_filesystem(fs) else fs.unstrip_protocol(parquet_path)
+
+ shards = fs.glob(parquet_path + "*.parquet")
+ num_bytes = sum(fs.sizes(shards))
+ shard_lengths = get_parquet_lengths(shards)
+
+ # Convert to arrow
+ if self._path.endswith(".arrow"):
+ logger.info(f"Converting parquet files {self._parquet_path} to arrow {self._path}")
+ try: # stream conversion
+ num_bytes = 0
+ for shard in hf_tqdm(shards, unit="shards"):
+ with fs.open(shard, "rb") as source:
+ with fs.open(shard.replace(".parquet", ".arrow"), "wb") as destination:
+ shard_num_bytes, _ = parquet_to_arrow(source, destination)
+ num_bytes += shard_num_bytes
+ except OSError as e: # broken pipe can happen if the connection is unstable, do local conversion instead
+ if e.errno != errno.EPIPE: # not a broken pipe
+ raise
+ logger.warning(
+ "Broken Pipe during stream conversion from parquet to arrow. Using local convert instead"
+ )
+ local_convert_dir = os.path.join(self._cache_dir, "beam_convert")
+ os.makedirs(local_convert_dir, exist_ok=True)
+ num_bytes = 0
+ for shard in hf_tqdm(shards, unit="shards"):
+ local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(shard) + ".parquet")
+ fs.download(shard, local_parquet_path)
+ local_arrow_path = local_parquet_path.replace(".parquet", ".arrow")
+ shard_num_bytes, _ = parquet_to_arrow(local_parquet_path, local_arrow_path)
+ num_bytes += shard_num_bytes
+ remote_arrow_path = shard.replace(".parquet", ".arrow")
+ fs.upload(local_arrow_path, remote_arrow_path)
+
+ # Save metrics
+ counters_dict = {metric.key.metric.name: metric.result for metric in metrics_query_result["counters"]}
+ self._num_examples = counters_dict["num_examples"]
+ self._num_bytes = num_bytes
+ self._shard_lengths = shard_lengths
+ return self._num_examples, self._num_bytes
+
+
+def get_parquet_lengths(sources) -> List[int]:
+ shard_lengths = []
+ for source in hf_tqdm(sources, unit="parquet files"):
+ parquet_file = pa.parquet.ParquetFile(source)
+ shard_lengths.append(parquet_file.metadata.num_rows)
+ return shard_lengths
+
+
+def parquet_to_arrow(source, destination) -> List[int]:
+ """Convert parquet file to arrow file. Inputs can be str paths or file-like objects"""
+ stream = None if isinstance(destination, str) else destination
+ parquet_file = pa.parquet.ParquetFile(source)
+ # Beam can create empty Parquet files, so we need to pass the source Parquet file's schema
+ with ArrowWriter(schema=parquet_file.schema_arrow, path=destination, stream=stream) as writer:
+ for record_batch in parquet_file.iter_batches():
+ pa_table = pa.Table.from_batches([record_batch])
+ writer.write_table(pa_table)
+ num_bytes, num_examples = writer.finalize()
+ return num_bytes, num_examples
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/builder.py b/env-llmeval/lib/python3.10/site-packages/datasets/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f4bc1962b60e9080542e29ed402212865cb3b1e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/builder.py
@@ -0,0 +1,2270 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""DatasetBuilder base class."""
+
+import abc
+import contextlib
+import copy
+import inspect
+import os
+import posixpath
+import shutil
+import textwrap
+import time
+import urllib
+import warnings
+from dataclasses import dataclass
+from functools import partial
+from pathlib import Path
+from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, Union
+from unittest.mock import patch
+
+import fsspec
+import pyarrow as pa
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from . import config, utils
+from .arrow_dataset import Dataset
+from .arrow_reader import (
+ HF_GCP_BASE_URL,
+ ArrowReader,
+ DatasetNotOnHfGcsError,
+ MissingFilesOnHfGcsError,
+ ReadInstruction,
+)
+from .arrow_writer import ArrowWriter, BeamWriter, ParquetWriter, SchemaInferenceError
+from .data_files import DataFilesDict, DataFilesPatternsDict, sanitize_patterns
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager, DownloadMode
+from .download.mock_download_manager import MockDownloadManager
+from .download.streaming_download_manager import StreamingDownloadManager, xjoin, xopen
+from .exceptions import DatasetGenerationCastError, DatasetGenerationError, FileFormatError, ManualDownloadError
+from .features import Features
+from .filesystems import (
+ is_remote_filesystem,
+ rename,
+)
+from .fingerprint import Hasher
+from .info import DatasetInfo, DatasetInfosDict, PostProcessedInfo
+from .iterable_dataset import ArrowExamplesIterable, ExamplesIterable, IterableDataset
+from .keyhash import DuplicatedKeysError
+from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase
+from .splits import Split, SplitDict, SplitGenerator, SplitInfo
+from .streaming import extend_dataset_builder_for_streaming
+from .table import CastError
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils._filelock import FileLock
+from .utils.file_utils import cached_path, is_remote_url
+from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits
+from .utils.py_utils import (
+ classproperty,
+ convert_file_size_to_int,
+ has_sufficient_disk_space,
+ iflatmap_unordered,
+ map_nested,
+ memoize,
+ size_str,
+ temporary_assignment,
+)
+from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs
+from .utils.track import tracked_list
+
+
+if TYPE_CHECKING:
+ from .load import DatasetModule
+
+
+logger = logging.get_logger(__name__)
+
+
+class InvalidConfigName(ValueError):
+ pass
+
+
+@dataclass
+class BuilderConfig:
+ """Base class for `DatasetBuilder` data configuration.
+
+ `DatasetBuilder` subclasses with data configuration options should subclass
+ `BuilderConfig` and add their own properties.
+
+ Attributes:
+ name (`str`, defaults to `default`):
+ The name of the configuration.
+ version (`Version` or `str`, defaults to `0.0.0`):
+ The version of the configuration.
+ data_dir (`str`, *optional*):
+ Path to the directory containing the source data.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ description (`str`, *optional*):
+ A human description of the configuration.
+ """
+
+ name: str = "default"
+ version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
+ data_dir: Optional[str] = None
+ data_files: Optional[Union[DataFilesDict, DataFilesPatternsDict]] = None
+ description: Optional[str] = None
+
+ def __post_init__(self):
+ # The config name is used to name the cache directory.
+ for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
+ if invalid_char in self.name:
+ raise InvalidConfigName(
+ f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
+ f"They could create issues when creating a directory for this config on Windows filesystem."
+ )
+ if self.data_files is not None and not isinstance(self.data_files, (DataFilesDict, DataFilesPatternsDict)):
+ raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}")
+
+ def __eq__(self, o):
+ # we need to override the default dataclass __eq__ since it doesn't check for
+ # other attributes that the ones of the signature.
+ if set(self.__dict__.keys()) != set(o.__dict__.keys()):
+ return False
+ return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
+
+ def create_config_id(
+ self,
+ config_kwargs: dict,
+ custom_features: Optional[Features] = None,
+ ) -> str:
+ """
+ The config id is used to build the cache directory.
+ By default it is equal to the config name.
+ However the name of a config is not sufficient to have a unique identifier for the dataset being generated
+ since it doesn't take into account:
+ - the config kwargs that can be used to overwrite attributes
+ - the custom features used to write the dataset
+ - the data_files for json/text/csv/pandas datasets
+
+ Therefore the config id is just the config name with an optional suffix based on these.
+ """
+ # Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
+ suffix: Optional[str] = None
+ config_kwargs_to_add_to_suffix = config_kwargs.copy()
+ # name and version are already used to build the cache directory
+ config_kwargs_to_add_to_suffix.pop("name", None)
+ config_kwargs_to_add_to_suffix.pop("version", None)
+ # data dir handling (when specified it points to the manually downloaded data):
+ # it was previously ignored before the introduction of config id because we didn't want
+ # to change the config name. Now it's fine to take it into account for the config id.
+ # config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ if "data_dir" in config_kwargs_to_add_to_suffix:
+ if config_kwargs_to_add_to_suffix["data_dir"] is None:
+ config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ else:
+ # canonicalize the data dir to avoid two paths to the same location having different
+ # hashes
+ data_dir = config_kwargs_to_add_to_suffix["data_dir"]
+ data_dir = os.path.normpath(data_dir)
+ config_kwargs_to_add_to_suffix["data_dir"] = data_dir
+ if config_kwargs_to_add_to_suffix:
+ # we don't care about the order of the kwargs
+ config_kwargs_to_add_to_suffix = {
+ k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
+ }
+ if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
+ suffix = ",".join(
+ str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
+ )
+ if len(suffix) > 32: # hash if too long
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+ else:
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+
+ if custom_features is not None:
+ m = Hasher()
+ if suffix:
+ m.update(suffix)
+ m.update(custom_features)
+ suffix = m.hexdigest()
+
+ if suffix:
+ config_id = self.name + "-" + suffix
+ if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
+ config_id = self.name + "-" + Hasher.hash(suffix)
+ return config_id
+ else:
+ return self.name
+
+ def _resolve_data_files(self, base_path: str, download_config: DownloadConfig) -> None:
+ if isinstance(self.data_files, DataFilesPatternsDict):
+ base_path = xjoin(base_path, self.data_dir) if self.data_dir else base_path
+ self.data_files = self.data_files.resolve(base_path, download_config)
+
+
+class DatasetBuilder:
+ """Abstract base class for all datasets.
+
+ `DatasetBuilder` has 3 key methods:
+
+ - [`DatasetBuilder.info`]: Documents the dataset, including feature
+ names, types, shapes, version, splits, citation, etc.
+ - [`DatasetBuilder.download_and_prepare`]: Downloads the source data
+ and writes it to disk.
+ - [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
+
+ Some `DatasetBuilder`s expose multiple variants of the
+ dataset by defining a [`BuilderConfig`] subclass and accepting a
+ config object (or name) on construction. Configurable datasets expose a
+ pre-defined set of configurations in [`DatasetBuilder.builder_configs`].
+
+ Args:
+ cache_dir (`str`, *optional*):
+ Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
+ dataset_name (`str`, *optional*):
+ Name of the dataset, if different from the builder name. Useful for packaged builders
+ like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets
+ that use the same packaged builder.
+ config_name (`str`, *optional*):
+ Name of the dataset configuration.
+ It affects the data generated on disk. Different configurations will have their own subdirectories and
+ versions.
+ If not provided, the default configuration is used (if it exists).
+
+
+
+ Parameter `name` was renamed to `config_name`.
+
+
+ hash (`str`, *optional*):
+ Hash specific to the dataset code. Used to update the caching directory when the
+ dataset loading script code is updated (to avoid reusing old data).
+ The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files.
+ This can be a remote URL.
+ features ([`Features`], *optional*):
+ Features types to use with this dataset.
+ It can be used to change the [`Features`] types of a dataset, for example.
+ token (`str` or `bool`, *optional*):
+ String or boolean to use as Bearer token for remote files on the
+ Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
+ repo_id (`str`, *optional*):
+ ID of the dataset repository.
+ Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
+ and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ For builders like "csv" or "json" that need the user to specify data files. They can be either
+ local or remote files. For convenience, you can use a `DataFilesDict`.
+ data_dir (`str`, *optional*):
+ Path to directory containing source data file(s).
+ Use only if `data_files` is not passed, in which case it is equivalent to passing
+ `os.path.join(data_dir, "**")` as `data_files`.
+ For builders that require manual download, it must be the path to the local directory containing the
+ manually downloaded data.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
+ writer_batch_size (`int`, *optional*):
+ Batch size used by the ArrowWriter.
+ It defines the number of samples that are kept in memory before writing them
+ and also the length of the arrow chunks.
+ None means that the ArrowWriter will use its default value.
+ name (`str`): Configuration name for the dataset.
+
+
+
+ Use `config_name` instead.
+
+
+
+ **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
+ configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
+ configuration class is [`BuilderConfig`] or a subclass of it.
+ """
+
+ # Default version
+ VERSION = None # Default version set in BuilderConfig
+
+ # Class for the builder config.
+ BUILDER_CONFIG_CLASS = BuilderConfig
+
+ # Named configurations that modify the data generated by download_and_prepare.
+ BUILDER_CONFIGS = []
+
+ # Optional default config name to be used when name is None
+ DEFAULT_CONFIG_NAME = None
+
+ # Default batch size used by the ArrowWriter
+ # It defines the number of samples that are kept in memory before writing them
+ # and also the length of the arrow chunks
+ # None means that the ArrowWriter will use its default value
+ DEFAULT_WRITER_BATCH_SIZE = None
+
+ def __init__(
+ self,
+ cache_dir: Optional[str] = None,
+ dataset_name: Optional[str] = None,
+ config_name: Optional[str] = None,
+ hash: Optional[str] = None,
+ base_path: Optional[str] = None,
+ info: Optional[DatasetInfo] = None,
+ features: Optional[Features] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ repo_id: Optional[str] = None,
+ data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
+ data_dir: Optional[str] = None,
+ storage_options: Optional[dict] = None,
+ writer_batch_size: Optional[int] = None,
+ name="deprecated",
+ **config_kwargs,
+ ):
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if name != "deprecated":
+ warnings.warn(
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+ config_name = name
+ # DatasetBuilder name
+ self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
+ self.hash: Optional[str] = hash
+ self.base_path = base_path
+ self.token = token
+ # For backwards compatibility (e.g. if accessed in a dataset script)
+ self.use_auth_token = token
+ self.repo_id = repo_id
+ self.storage_options = storage_options or {}
+ self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name
+ self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
+
+ if data_files is not None and not isinstance(data_files, DataFilesDict):
+ data_files = DataFilesDict.from_patterns(
+ sanitize_patterns(data_files),
+ base_path=base_path,
+ download_config=DownloadConfig(token=token, storage_options=self.storage_options),
+ )
+
+ # Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
+ if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
+ config_kwargs["features"] = features
+ if data_files is not None:
+ config_kwargs["data_files"] = data_files
+ if data_dir is not None:
+ config_kwargs["data_dir"] = data_dir
+ self.config, self.config_id = self._create_builder_config(
+ config_name=config_name,
+ custom_features=features,
+ **config_kwargs,
+ )
+
+ # prepare info: DatasetInfo are a standardized dataclass across all datasets
+ # Prefill datasetinfo
+ if info is None:
+ # TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
+ info = self.get_exported_dataset_info()
+ info.update(self._info())
+ info.builder_name = self.name
+ info.dataset_name = self.dataset_name
+ info.config_name = self.config.name
+ info.version = self.config.version
+ self.info = info
+ # update info with user specified infos
+ if features is not None:
+ self.info.features = features
+
+ # Prepare data dirs:
+ # cache_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
+ self._cache_dir_root = (
+ self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
+ )
+ self._cache_downloaded_dir = (
+ posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
+ if cache_dir
+ else str(config.DOWNLOADED_DATASETS_PATH)
+ )
+ self._cache_downloaded_dir = (
+ self._cache_downloaded_dir
+ if is_remote_url(self._cache_downloaded_dir)
+ else os.path.expanduser(self._cache_downloaded_dir)
+ )
+
+ # In case there exists a legacy cache directory
+ self._legacy_relative_data_dir = None
+
+ self._cache_dir = self._build_cache_dir()
+ if not is_remote_url(self._cache_dir_root):
+ os.makedirs(self._cache_dir_root, exist_ok=True)
+ lock_path = os.path.join(
+ self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock"
+ )
+ with FileLock(lock_path):
+ if os.path.exists(self._cache_dir): # check if data exist
+ if len(os.listdir(self._cache_dir)) > 0:
+ if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)):
+ logger.info("Overwrite dataset info from restored data version if exists.")
+ self.info = DatasetInfo.from_directory(self._cache_dir)
+ else: # dir exists but no data, remove the empty dir as data aren't available anymore
+ logger.warning(
+ f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. "
+ )
+ os.rmdir(self._cache_dir)
+
+ # Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
+ self._output_dir = self._cache_dir
+ self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
+
+ # Set download manager
+ self.dl_manager = None
+
+ # Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
+ self._record_infos = False
+
+ # Set in `.download_and_prepare` once the format of the generated dataset is known
+ self._file_format = None
+
+ # Enable streaming (e.g. it patches "open" to work with remote files)
+ extend_dataset_builder_for_streaming(self)
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-enable streaming, since patched functions are not kept when pickling
+ extend_dataset_builder_for_streaming(self)
+
+ # Must be set for datasets that use 'data_dir' functionality - the ones
+ # that require users to do additional steps to download the data
+ # (this is usually due to some external regulations / rules).
+ # This field should contain a string with user instructions, including
+ # the list of files that should be present. It will be
+ # displayed in the dataset documentation.
+ @property
+ def manual_download_instructions(self) -> Optional[str]:
+ return None
+
+ def _check_legacy_cache(self) -> Optional[str]:
+ """Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13"""
+ if (
+ self.__module__.startswith("datasets.")
+ and not is_remote_url(self._cache_dir_root)
+ and self.config.name == "default"
+ ):
+ from .packaged_modules import _PACKAGED_DATASETS_MODULES
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
+ config_id = config_name + self.config_id[len(self.config.name) :]
+ hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
+ legacy_relative_data_dir = posixpath.join(
+ self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
+ config_id,
+ "0.0.0",
+ hash,
+ )
+ legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
+ if os.path.isdir(legacy_cache_dir):
+ return legacy_relative_data_dir
+
+ def _check_legacy_cache2(self, dataset_module: "DatasetModule") -> Optional[str]:
+ """Check for the old cache directory template {cache_dir}/{namespace}___{dataset_name}/{config_name}-xxx from 2.14 and 2.15"""
+ if self.__module__.startswith("datasets.") and not is_remote_url(self._cache_dir_root):
+ from .packaged_modules import _PACKAGED_DATASETS_MODULES
+ from .utils._dill import Pickler
+
+ def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str:
+ """
+ Used to update hash of packaged modules which is used for creating unique cache directories to reflect
+ different config parameters which are passed in metadata from readme.
+ """
+ params_to_exclude = {"config_name", "version", "description"}
+ params_to_add_to_hash = {
+ param: value
+ for param, value in sorted(config_parameters.items())
+ if param not in params_to_exclude
+ }
+ m = Hasher()
+ m.update(hash)
+ m.update(params_to_add_to_hash)
+ return m.hexdigest()
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ with patch.object(Pickler, "_legacy_no_dict_keys_sorting", True):
+ config_id = self.config.name + "-" + Hasher.hash({"data_files": self.config.data_files})
+ hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
+ if (
+ dataset_module.builder_configs_parameters.metadata_configs
+ and self.config.name in dataset_module.builder_configs_parameters.metadata_configs
+ ):
+ hash = update_hash_with_config_parameters(
+ hash, dataset_module.builder_configs_parameters.metadata_configs[self.config.name]
+ )
+ legacy_relative_data_dir = posixpath.join(
+ self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
+ config_id,
+ "0.0.0",
+ hash,
+ )
+ legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
+ if os.path.isdir(legacy_cache_dir):
+ return legacy_relative_data_dir
+
+ @classmethod
+ def get_all_exported_dataset_infos(cls) -> DatasetInfosDict:
+ """Empty dict if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_all_exported_dataset_infos()
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)}
+ ```
+ """
+ return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
+
+ def get_exported_dataset_info(self) -> DatasetInfo:
+ """Empty `DatasetInfo` if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_exported_dataset_info()
+ DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)
+ ```
+ """
+ return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
+
+ def _create_builder_config(
+ self, config_name=None, custom_features=None, **config_kwargs
+ ) -> Tuple[BuilderConfig, str]:
+ """Create and validate BuilderConfig object as well as a unique config id for this config.
+ Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
+ config_kwargs override the defaults kwargs in config
+ """
+ builder_config = None
+
+ # try default config
+ if config_name is None and self.BUILDER_CONFIGS:
+ if self.DEFAULT_CONFIG_NAME is not None:
+ builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME)
+ logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}")
+ else:
+ if len(self.BUILDER_CONFIGS) > 1:
+ if not config_kwargs:
+ example_of_usage = f"load_dataset('{self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')"
+ raise ValueError(
+ "Config name is missing."
+ f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}"
+ + f"\nExample of usage:\n\t`{example_of_usage}`"
+ )
+ else:
+ builder_config = self.BUILDER_CONFIGS[0]
+ logger.info(
+ f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}"
+ )
+
+ # try to get config by name
+ if isinstance(config_name, str):
+ builder_config = self.builder_configs.get(config_name)
+ if builder_config is None and self.BUILDER_CONFIGS:
+ raise ValueError(
+ f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}"
+ )
+
+ # if not using an existing config, then create a new config on the fly
+ if not builder_config:
+ if config_name is not None:
+ config_kwargs["name"] = config_name
+ elif self.DEFAULT_CONFIG_NAME and not config_kwargs:
+ # Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed
+ config_kwargs["name"] = self.DEFAULT_CONFIG_NAME
+ if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
+ config_kwargs["version"] = self.VERSION
+ builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
+
+ # otherwise use the config_kwargs to overwrite the attributes
+ else:
+ builder_config = copy.deepcopy(builder_config) if config_kwargs else builder_config
+ for key, value in config_kwargs.items():
+ if value is not None:
+ if not hasattr(builder_config, key):
+ raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.")
+ setattr(builder_config, key, value)
+
+ if not builder_config.name:
+ raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}")
+
+ # resolve data files if needed
+ builder_config._resolve_data_files(
+ base_path=self.base_path,
+ download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
+ )
+
+ # compute the config id that is going to be used for caching
+ config_id = builder_config.create_config_id(
+ config_kwargs,
+ custom_features=custom_features,
+ )
+ is_custom = (config_id not in self.builder_configs) and config_id != "default"
+ if is_custom:
+ logger.info(f"Using custom data configuration {config_id}")
+ else:
+ if (
+ builder_config.name in self.builder_configs
+ and builder_config != self.builder_configs[builder_config.name]
+ ):
+ raise ValueError(
+ "Cannot name a custom BuilderConfig the same as an available "
+ f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}"
+ )
+ if not builder_config.version:
+ raise ValueError(f"BuilderConfig {builder_config.name} must have a version")
+
+ return builder_config, config_id
+
+ @classproperty
+ @classmethod
+ @memoize()
+ def builder_configs(cls) -> Dict[str, BuilderConfig]:
+ """Dictionary of pre-defined configurations for this builder class."""
+ configs = {config.name: config for config in cls.BUILDER_CONFIGS}
+ if len(configs) != len(cls.BUILDER_CONFIGS):
+ names = [config.name for config in cls.BUILDER_CONFIGS]
+ raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}")
+ return configs
+
+ @property
+ def cache_dir(self):
+ return self._cache_dir
+
+ def _use_legacy_cache_dir_if_possible(self, dataset_module: "DatasetModule"):
+ # Check for the legacy cache directory template (datasets<3.0.0)
+ self._legacy_relative_data_dir = (
+ self._check_legacy_cache2(dataset_module) or self._check_legacy_cache() or None
+ )
+ self._cache_dir = self._build_cache_dir()
+ self._output_dir = self._cache_dir
+
+ def _relative_data_dir(self, with_version=True, with_hash=True) -> str:
+ """Relative path of this dataset in cache_dir:
+ Will be:
+ self.dataset_name/self.config.version/self.hash/
+ or if a repo_id with a namespace has been specified:
+ self.namespace___self.dataset_name/self.config.version/self.hash/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ if self._legacy_relative_data_dir is not None and with_version and with_hash:
+ return self._legacy_relative_data_dir
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ builder_data_dir = self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}"
+ builder_data_dir = posixpath.join(builder_data_dir, self.config_id)
+ if with_version:
+ builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version))
+ if with_hash and self.hash and isinstance(self.hash, str):
+ builder_data_dir = posixpath.join(builder_data_dir, self.hash)
+ return builder_data_dir
+
+ def _build_cache_dir(self):
+ """Return the data directory for the current version."""
+ builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False))
+ version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True))
+
+ def _other_versions_on_disk():
+ """Returns previous versions on disk."""
+ if not os.path.exists(builder_data_dir):
+ return []
+
+ version_dirnames = []
+ for dir_name in os.listdir(builder_data_dir):
+ try:
+ version_dirnames.append((utils.Version(dir_name), dir_name))
+ except ValueError: # Invalid version (ex: incomplete data dir)
+ pass
+ version_dirnames.sort(reverse=True)
+ return version_dirnames
+
+ # Check and warn if other versions exist
+ if not is_remote_url(builder_data_dir):
+ version_dirs = _other_versions_on_disk()
+ if version_dirs:
+ other_version = version_dirs[0][0]
+ if other_version != self.config.version:
+ warn_msg = (
+ f"Found a different version {str(other_version)} of dataset {self.dataset_name} in "
+ f"cache_dir {self._cache_dir_root}. Using currently defined version "
+ f"{str(self.config.version)}."
+ )
+ logger.warning(warn_msg)
+
+ return version_data_dir
+
+ @abc.abstractmethod
+ def _info(self) -> DatasetInfo:
+ """Construct the DatasetInfo object. See `DatasetInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (DatasetInfo) The dataset information
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def get_imported_module_dir(cls):
+ """Return the path of the module of this class or subclass."""
+ return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
+
+ def _rename(self, src: str, dst: str):
+ rename(self._fs, src, dst)
+
+ def download_and_prepare(
+ self,
+ output_dir: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ try_from_hf_gcs: bool = True,
+ dl_manager: Optional[DownloadManager] = None,
+ base_path: Optional[str] = None,
+ use_auth_token="deprecated",
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **download_and_prepare_kwargs,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ output_dir (`str`, *optional*):
+ Output directory for the dataset.
+ Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default.
+
+
+ download_config (`DownloadConfig`, *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, *optional*):
+ Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ try_from_hf_gcs (`bool`):
+ If `True`, it will try to download the already prepared dataset from the HF Google cloud storage.
+ dl_manager (`DownloadManager`, *optional*):
+ Specific `DownloadManger` to use.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files. This can be a remote url.
+ If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead.
+ use_auth_token (`Union[str, bool]`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from ~/.huggingface.
+
+
+
+ Pass `use_auth_token` to `load_dataset_builder` instead.
+
+
+ file_format (`str`, *optional*):
+ Format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files.
+
+
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+
+
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the caching file-system backend, if any.
+
+
+ **download_and_prepare_kwargs (additional keyword arguments): Keyword arguments.
+
+ Example:
+
+ Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare()
+ ```
+
+ Download and prepare the dataset as sharded Parquet files locally:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare("./output_dir", file_format="parquet")
+ ```
+
+ Download and prepare the dataset as sharded Parquet files in a cloud storage:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key}
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet")
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass `token` to `load_dataset_builder` instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ else:
+ token = self.token
+
+ output_dir = output_dir if output_dir is not None else self._cache_dir
+ # output_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ fs, _, [output_dir] = fsspec.get_fs_token_paths(output_dir, storage_options=storage_options)
+ self._fs = fs
+ self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir)
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+ base_path = base_path if base_path is not None else self.base_path
+
+ if file_format is not None and file_format not in ["arrow", "parquet"]:
+ raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'")
+ self._file_format = file_format
+
+ if self._fs._strip_protocol(self._output_dir) == "":
+ # We don't support the root directory, because it has no dirname,
+ # and we need a dirname to use a .incomplete directory
+ # when the dataset is being written
+ raise RuntimeError(
+ f"Unable to download and prepare the dataset at the root {self._output_dir}. "
+ f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'"
+ )
+
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig(
+ cache_dir=self._cache_downloaded_dir,
+ force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ use_etag=False,
+ num_proc=num_proc,
+ token=token,
+ storage_options=self.storage_options,
+ ) # We don't use etag for data files to speed up the process
+
+ dl_manager = DownloadManager(
+ dataset_name=self.dataset_name,
+ download_config=download_config,
+ data_dir=self.config.data_dir,
+ base_path=base_path,
+ record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS),
+ )
+
+ is_local = not is_remote_filesystem(self._fs)
+
+ if (
+ isinstance(dl_manager, MockDownloadManager)
+ or not is_local
+ or file_format != "arrow"
+ or max_shard_size is not None
+ ):
+ try_from_hf_gcs = False
+ self.dl_manager = dl_manager
+
+ # Prevent parallel local disk operations
+ if is_local:
+ # Create parent directory of the output_dir to put the lock file in there
+ Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
+ lock_path = self._output_dir + "_builder.lock"
+
+ # File locking only with local paths; no file locking on GCS or S3
+ with FileLock(lock_path) if is_local else contextlib.nullcontext():
+ # Check if the data already exists
+ data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME))
+ if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS:
+ logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})")
+ # We need to update the info in case some splits were added in the meantime
+ # for example when calling load_dataset from multiple workers.
+ self.info = self._load_info()
+ self.download_post_processing_resources(dl_manager)
+ return
+
+ logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})")
+ if is_local: # if cache dir is local, check for available space
+ if not has_sufficient_disk_space(
+ self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent
+ ):
+ raise OSError(
+ f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})"
+ )
+
+ @contextlib.contextmanager
+ def incomplete_dir(dirname):
+ """Create temporary dir for dirname and rename on exit."""
+ if not is_local:
+ self._fs.makedirs(dirname, exist_ok=True)
+ yield dirname
+ else:
+ tmp_dir = dirname + ".incomplete"
+ os.makedirs(tmp_dir, exist_ok=True)
+ try:
+ yield tmp_dir
+ if os.path.isdir(dirname):
+ shutil.rmtree(dirname)
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory
+ shutil.move(tmp_dir, dirname)
+ finally:
+ if os.path.exists(tmp_dir):
+ shutil.rmtree(tmp_dir)
+
+ # Print is intentional: we want this to always go to stdout so user has
+ # information needed to cancel download/preparation if needed.
+ # This comes right before the progress bar.
+ if self.info.size_in_bytes:
+ logger.info(
+ f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} "
+ f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, "
+ f"post-processed: {size_str(self.info.post_processing_size)}, "
+ f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..."
+ )
+ else:
+ _dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir
+ logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...")
+
+ self._check_manual_download(dl_manager)
+
+ # Create a tmp dir and rename to self._output_dir on successful exit.
+ with incomplete_dir(self._output_dir) as tmp_output_dir:
+ # Temporarily assign _output_dir to tmp_data_dir to avoid having to forward
+ # it to every sub function.
+ with temporary_assignment(self, "_output_dir", tmp_output_dir):
+ # Try to download the already prepared dataset files
+ downloaded_from_gcs = False
+ if try_from_hf_gcs:
+ try:
+ self._download_prepared_from_hf_gcs(dl_manager.download_config)
+ downloaded_from_gcs = True
+ except (DatasetNotOnHfGcsError, MissingFilesOnHfGcsError):
+ logger.info("Dataset not on Hf google storage. Downloading and preparing it from source")
+ except ConnectionError:
+ logger.warning("HF google storage unreachable. Downloading and preparing it from source")
+ if not downloaded_from_gcs:
+ prepare_split_kwargs = {"file_format": file_format}
+ if max_shard_size is not None:
+ prepare_split_kwargs["max_shard_size"] = max_shard_size
+ if num_proc is not None:
+ prepare_split_kwargs["num_proc"] = num_proc
+ self._download_and_prepare(
+ dl_manager=dl_manager,
+ verification_mode=verification_mode,
+ **prepare_split_kwargs,
+ **download_and_prepare_kwargs,
+ )
+ # Sync info
+ self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
+ self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
+ self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
+ # Save info
+ self._save_info()
+
+ # Download post processing resources
+ self.download_post_processing_resources(dl_manager)
+
+ logger.info(
+ f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. "
+ f"Subsequent calls will reuse this data."
+ )
+
+ def _check_manual_download(self, dl_manager):
+ if self.manual_download_instructions is not None and dl_manager.manual_dir is None:
+ raise ManualDownloadError(
+ textwrap.dedent(
+ f"""\
+ The dataset {self.dataset_name} with config {self.config.name} requires manual data.
+ Please follow the manual download instructions:
+ {self.manual_download_instructions}
+ Manual data can be loaded with:
+ datasets.load_dataset("{self.dataset_name}", data_dir="")"""
+ )
+ )
+
+ def _download_prepared_from_hf_gcs(self, download_config: DownloadConfig):
+ relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
+ reader = ArrowReader(self._output_dir, self.info)
+ # use reader instructions to download the right files
+ reader.download_from_hf_gcs(download_config, relative_data_dir)
+ downloaded_info = DatasetInfo.from_directory(self._output_dir)
+ self.info.update(downloaded_info)
+ # download post processing resources
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ for split in self.info.splits:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ try:
+ resource_path = cached_path(remote_cache_dir + "/" + resource_file_name)
+ shutil.move(resource_path, os.path.join(self._output_dir, resource_file_name))
+ except ConnectionError:
+ logger.info(f"Couldn't download resourse file {resource_file_name} from Hf google storage.")
+ logger.info("Dataset downloaded from Hf google storage.")
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs):
+ """Downloads and prepares dataset for reading.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required data and generate
+ the pre-processed datasets files.
+
+ Args:
+ dl_manager ([`DownloadManager`]):
+ `DownloadManager` used to download and cache data.
+ verification_mode ([`VerificationMode`]):
+ if `ALL_CHECKS`, perform all the verifications including checksums.
+ if `BASIC_CHECKS`, do not perform checksums, only perform split tests.
+ if `NO_CHECKS`, do not perform any verification.
+ prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size`
+ """
+ # Generating data for all splits
+ split_dict = SplitDict(dataset_name=self.dataset_name)
+ split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
+ split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
+
+ # Checksums verification
+ if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:
+ verify_checksums(
+ self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
+ )
+
+ # Build splits
+ for split_generator in split_generators:
+ if str(split_generator.split_info.name).lower() == "all":
+ raise ValueError(
+ "`all` is a special split keyword corresponding to the "
+ "union of all splits, so cannot be used as key in "
+ "._split_generator()."
+ )
+
+ logger.info(f"Generating {split_generator.split_info.name} split")
+ split_dict.add(split_generator.split_info)
+
+ try:
+ # Prepare split will record examples associated to the split
+ self._prepare_split(split_generator, **prepare_split_kwargs)
+ except OSError as e:
+ raise OSError(
+ "Cannot find data file. "
+ + (self.manual_download_instructions or "")
+ + "\nOriginal error:\n"
+ + str(e)
+ ) from None
+ # If check_duplicates is set to True , then except DuplicatedKeysError
+ except DuplicatedKeysError as e:
+ raise DuplicatedKeysError(
+ e.key,
+ e.duplicate_key_indices,
+ fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py",
+ ) from None
+ dl_manager.manage_extracted_files()
+
+ if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
+ verify_splits(self.info.splits, split_dict)
+
+ # Update the info object with the splits.
+ self.info.splits = split_dict
+ self.info.download_size = dl_manager.downloaded_size
+
+ def download_post_processing_resources(self, dl_manager):
+ for split in self.info.splits or []:
+ for resource_name, resource_file_name in self._post_processing_resources(split).items():
+ if not not is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}")
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resource_path = os.path.join(self._output_dir, resource_file_name)
+ if not os.path.exists(resource_path):
+ downloaded_resource_path = self._download_post_processing_resources(
+ split, resource_name, dl_manager
+ )
+ if downloaded_resource_path:
+ logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}")
+ shutil.move(downloaded_resource_path, resource_path)
+
+ def _load_info(self) -> DatasetInfo:
+ return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_info(self):
+ file_lock = (
+ FileLock(self._output_dir + "_info.lock")
+ if not is_remote_filesystem(self._fs)
+ else contextlib.nullcontext()
+ )
+ with file_lock:
+ self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_infos(self):
+ file_lock = (
+ FileLock(self._output_dir + "_infos.lock")
+ if not is_remote_filesystem(self._fs)
+ else contextlib.nullcontext()
+ )
+ with file_lock:
+ DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ """Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
+ del prepare_split_kwargs
+ return {}
+
+ def as_dataset(
+ self,
+ split: Optional[Split] = None,
+ run_post_process=True,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ in_memory=False,
+ ) -> Union[Dataset, DatasetDict]:
+ """Return a Dataset for the specified split.
+
+ Args:
+ split (`datasets.Split`):
+ Which subset of the data to return.
+ run_post_process (`bool`, defaults to `True`):
+ Whether to run post-processing dataset transforms and/or add
+ indexes.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Whether to ignore the verifications of the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ datasets.Dataset
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder('rotten_tomatoes')
+ >>> builder.download_and_prepare()
+ >>> ds = builder.as_dataset(split='train')
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 8530
+ })
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = verification_mode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if self._file_format is not None and self._file_format != "arrow":
+ raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.')
+ if is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.")
+ if not os.path.exists(self._output_dir):
+ raise FileNotFoundError(
+ f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call "
+ "builder.download_and_prepare(), or use "
+ "datasets.load_dataset() before trying to access the Dataset object."
+ )
+
+ logger.debug(f'Constructing Dataset for split {split or ", ".join(self.info.splits)}, from {self._output_dir}')
+
+ # By default, return all splits
+ if split is None:
+ split = {s: s for s in self.info.splits}
+
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ partial(
+ self._build_single_dataset,
+ run_post_process=run_post_process,
+ verification_mode=verification_mode,
+ in_memory=in_memory,
+ ),
+ split,
+ map_tuple=True,
+ disable_tqdm=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = DatasetDict(datasets)
+ return datasets
+
+ def _build_single_dataset(
+ self,
+ split: Union[str, ReadInstruction, Split],
+ run_post_process: bool,
+ verification_mode: VerificationMode,
+ in_memory: bool = False,
+ ):
+ """as_dataset for a single split."""
+ if not isinstance(split, ReadInstruction):
+ split = str(split)
+ if split == "all":
+ split = "+".join(self.info.splits.keys())
+ split = Split(split)
+
+ # Build base dataset
+ ds = self._as_dataset(
+ split=split,
+ in_memory=in_memory,
+ )
+ if run_post_process:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resources_paths = {
+ resource_name: os.path.join(self._output_dir, resource_file_name)
+ for resource_name, resource_file_name in self._post_processing_resources(split).items()
+ }
+ post_processed = self._post_process(ds, resources_paths)
+ if post_processed is not None:
+ ds = post_processed
+ recorded_checksums = {}
+ record_checksums = False
+ for resource_name, resource_path in resources_paths.items():
+ size_checksum = get_size_checksum_dict(resource_path)
+ recorded_checksums[resource_name] = size_checksum
+ if verification_mode == VerificationMode.ALL_CHECKS and record_checksums:
+ if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
+ expected_checksums = None
+ else:
+ expected_checksums = self.info.post_processed.resources_checksums.get(split)
+ verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
+ if self.info.post_processed is None:
+ self.info.post_processed = PostProcessedInfo()
+ if self.info.post_processed.resources_checksums is None:
+ self.info.post_processed.resources_checksums = {}
+ self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
+ self.info.post_processing_size = sum(
+ checksums_dict["num_bytes"]
+ for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
+ for checksums_dict in split_checksums_dicts.values()
+ )
+ if self.info.dataset_size is not None and self.info.download_size is not None:
+ self.info.size_in_bytes = (
+ self.info.dataset_size + self.info.download_size + self.info.post_processing_size
+ )
+ self._save_info()
+ ds._info.post_processed = self.info.post_processed
+ ds._info.post_processing_size = self.info.post_processing_size
+ ds._info.size_in_bytes = self.info.size_in_bytes
+ if self.info.post_processed.features is not None:
+ if self.info.post_processed.features.type != ds.features.type:
+ raise ValueError(
+ f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}"
+ )
+ else:
+ ds.info.features = self.info.post_processed.features
+
+ return ds
+
+ def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset:
+ """Constructs a `Dataset`.
+
+ This is the internal implementation to overwrite called when user calls
+ `as_dataset`. It should read the pre-processed datasets files and generate
+ the `Dataset` object.
+
+ Args:
+ split (`datasets.Split`):
+ which subset of the data to read.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ `Dataset`
+ """
+ cache_dir = self._fs._strip_protocol(self._output_dir)
+ dataset_name = self.dataset_name
+ if self._check_legacy_cache():
+ dataset_name = self.name
+ dataset_kwargs = ArrowReader(cache_dir, self.info).read(
+ name=dataset_name,
+ instructions=split,
+ split_infos=self.info.splits.values(),
+ in_memory=in_memory,
+ )
+ fingerprint = self._get_dataset_fingerprint(split)
+ return Dataset(fingerprint=fingerprint, **dataset_kwargs)
+
+ def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str:
+ """The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs."""
+ hasher = Hasher()
+ hasher.update(Path(self._relative_data_dir()).as_posix())
+ hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder)
+ fingerprint = hasher.hexdigest()
+ return fingerprint
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ base_path: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ if is_remote_filesystem(self._fs):
+ raise NotImplementedError(
+ f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet."
+ )
+
+ dl_manager = StreamingDownloadManager(
+ base_path=base_path or self.base_path,
+ download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
+ dataset_name=self.dataset_name,
+ data_dir=self.config.data_dir,
+ )
+ self._check_manual_download(dl_manager)
+ splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}
+ # By default, return all splits
+ if split is None:
+ splits_generator = splits_generators
+ elif split in splits_generators:
+ splits_generator = splits_generators[split]
+ else:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}")
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ self._as_streaming_dataset_single,
+ splits_generator,
+ map_tuple=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _as_streaming_dataset_single(
+ self,
+ splits_generator,
+ ) -> IterableDataset:
+ ex_iterable = self._get_examples_iterable_for_split(splits_generator)
+ # add auth to be able to access and decode audio/image files from private repositories.
+ token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {}
+ return IterableDataset(
+ ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id
+ )
+
+ def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]:
+ """Run dataset transforms or add indexes"""
+ return None
+
+ def _post_processing_resources(self, split: str) -> Dict[str, str]:
+ """Mapping resource_name -> resource_file_name"""
+ return {}
+
+ def _download_post_processing_resources(
+ self, split: str, resource_name: str, dl_manager: DownloadManager
+ ) -> Optional[str]:
+ """Download the resource using the download manager and return the downloaded path."""
+ return None
+
+ @abc.abstractmethod
+ def _split_generators(self, dl_manager: DownloadManager):
+ """Specify feature dictionary generators and dataset splits.
+
+ This function returns a list of `SplitGenerator`s defining how to generate
+ data and what splits to use.
+
+ Example:
+
+ return [
+ datasets.SplitGenerator(
+ name=datasets.Split.TRAIN,
+ gen_kwargs={'file': 'train_data.zip'},
+ ),
+ datasets.SplitGenerator(
+ name=datasets.Split.TEST,
+ gen_kwargs={'file': 'test_data.zip'},
+ ),
+ ]
+
+ The above code will first call `_generate_examples(file='train_data.zip')`
+ to write the train data, then `_generate_examples(file='test_data.zip')` to
+ write the test data.
+
+ Datasets are typically split into different subsets to be used at various
+ stages of training and evaluation.
+
+ Note that for datasets without a `VALIDATION` split, you can use a
+ fraction of the `TRAIN` data for evaluation as you iterate on your model
+ so as not to overfit to the `TEST` data.
+
+ For downloads and extractions, use the given `download_manager`.
+ Note that the `DownloadManager` caches downloads, so it is fine to have each
+ generator attempt to download the source data.
+
+ A good practice is to download all data in this function, and then
+ distribute the relevant parts to each split with the `gen_kwargs` argument
+
+ Args:
+ dl_manager (`DownloadManager`):
+ Download manager to download the data
+
+ Returns:
+ `list`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Generate the examples and record them on disk.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ file_format (`str`, *optional*):
+ format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ **kwargs: Additional kwargs forwarded from _download_and_prepare (ex:
+ beam pipeline)
+ """
+ raise NotImplementedError()
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ """Generate the examples on the fly.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ """
+ raise NotImplementedError()
+
+
+class GeneratorBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on dict generators.
+
+ `GeneratorBasedBuilder` is a convenience class that abstracts away much
+ of the data writing and reading of `DatasetBuilder`. It expects subclasses to
+ implement generators of feature dictionaries across the dataset splits
+ (`_split_generators`). See the method docstrings for details.
+ """
+
+ @abc.abstractmethod
+ def _generate_examples(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `dict`, a feature dictionary
+ ready to be encoded and written to disk. The example will be
+ encoded with `self.info.features.encode_example({...})`.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ check_duplicate_keys: bool,
+ file_format="arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[int, str]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+
+ if self.info.splits is not None:
+ split_info = self.info.splits[split_generator.name]
+ else:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_input_shards < num_proc:
+ logger.warning(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=split_info.num_examples,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ "split_info": split_info,
+ "check_duplicate_keys": check_duplicate_keys,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_and_job: Tuple[int]):
+ shard_id, job_id = shard_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shards_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self,
+ gen_kwargs: dict,
+ fpath: str,
+ file_format: str,
+ max_shard_size: int,
+ split_info: SplitInfo,
+ check_duplicate_keys: bool,
+ job_id: int,
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ generator = self._generate_examples(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for key, record in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ example = self.info.features.encode_example(record) if self.info.features is not None else record
+ writer.write(example, key)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ super()._download_and_prepare(
+ dl_manager,
+ verification_mode,
+ check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
+ or verification_mode == VerificationMode.ALL_CHECKS,
+ **prepare_splits_kwargs,
+ )
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs)
+
+
+class ArrowBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet)."""
+
+ @abc.abstractmethod
+ def _generate_tables(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `pyarrow.Table`, a feature table
+ ready to be encoded and written to disk.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[str, int]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+
+ try:
+ split_info = self.info.splits[split_generator.name]
+ except Exception:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_input_shards < num_proc:
+ logger.warning(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=split_info.num_examples,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_id_and_job: Tuple[int]):
+ shard_id, job_id = shard_id_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shard_ids_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ gen_kwargs = {k: tracked_list(v) if isinstance(v, list) else v for k, v in gen_kwargs.items()}
+ generator = self._generate_tables(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for _, table in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ writer.write_table(table)
+ except CastError as cast_error:
+ raise DatasetGenerationCastError.from_cast_error(
+ cast_error=cast_error,
+ builder_name=self.info.builder_name,
+ gen_kwargs=gen_kwargs,
+ token=self.token,
+ )
+ num_examples_progress_update += len(table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ if isinstance(e, DatasetGenerationError):
+ raise
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs)
+
+
+class MissingBeamOptions(ValueError):
+ pass
+
+
+class BeamBasedBuilder(DatasetBuilder):
+ """Beam-based Builder."""
+
+ def __init__(self, *args, beam_runner=None, beam_options=None, **kwargs):
+ self._beam_runner = beam_runner
+ self._beam_options = beam_options
+ self._beam_writers = {} # {split: beam_writer} mapping.
+ super().__init__(*args, **kwargs)
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ # Pass `pipeline` into `_split_generators()` from `prepare_split_kwargs` if
+ # it's in the call signature of `_split_generators()`.
+ # This allows for global preprocessing in beam.
+ split_generators_kwargs = {}
+ split_generators_arg_names = inspect.signature(self._split_generators).parameters.keys()
+ if "pipeline" in split_generators_arg_names:
+ split_generators_kwargs["pipeline"] = prepare_split_kwargs["pipeline"]
+ return split_generators_kwargs
+
+ @abc.abstractmethod
+ def _build_pcollection(self, pipeline, **kwargs):
+ """Build the beam pipeline examples for each `SplitGenerator`.
+
+ This function extracts examples from the raw data with parallel transforms
+ in a Beam pipeline. It is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples from the PCollection will be
+ encoded and written to disk.
+
+
+ Warning: When running in a distributed setup, make sure that the data
+ which will be read (download_dir, manual_dir,...) and written (cache_dir)
+ can be accessed by the workers jobs. The data should be located in a
+ shared filesystem, like GCS.
+
+
+ Args:
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
+ Apache Beam pipeline.
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs.
+
+ Returns:
+ `beam.PCollection`: Apache Beam PCollection containing the
+ example to send to `self.info.features.encode_example(...)`.
+
+ Example:
+
+ ```
+ def _build_pcollection(pipeline, extracted_dir=None):
+ return (
+ pipeline
+ | beam.Create(gfile.io.listdir(extracted_dir))
+ | beam.Map(_process_file)
+ )
+ ```
+ """
+ raise NotImplementedError()
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ # Create the Beam pipeline and forward it to `_prepare_split`
+ import apache_beam as beam
+
+ import datasets.utils.beam_utils as beam_utils
+
+ beam_runner = self._beam_runner
+ beam_options = self._beam_options
+
+ if not beam_runner and not beam_options:
+ usage_example = f"load_dataset('{self.name}', '{self.config.name}', beam_runner='DirectRunner')"
+ raise MissingBeamOptions(
+ "Trying to generate a dataset using Apache Beam, yet no Beam Runner "
+ "or PipelineOptions() has been provided in `load_dataset` or in the "
+ "builder arguments. For big datasets it has to run on large-scale data "
+ "processing tools like Dataflow, Spark, etc. More information about "
+ "Apache Beam runners at "
+ "https://beam.apache.org/documentation/runners/capability-matrix/"
+ "\nIf you really want to run it locally because you feel like the "
+ "Dataset is small enough, you can use the local beam runner called "
+ "`DirectRunner` (you may run out of memory). \nExample of usage: "
+ f"\n\t`{usage_example}`"
+ )
+ if self._writer_batch_size is not None:
+ logger.warning(
+ "`writer_batch_size` is not supported for beam pipelines yet. Using the default chunk size for writing."
+ )
+
+ # Beam type checking assumes transforms multiple outputs are of same type,
+ # which is not our case. Plus it doesn't handle correctly all types, so we
+ # are better without it.
+ pipeline_options = {"pipeline_type_check": False}
+ if "num_proc" in prepare_splits_kwargs:
+ num_workers = prepare_splits_kwargs.pop("num_proc")
+ pipeline_options["direct_num_workers"] = num_workers
+ pipeline_options["num_workers"] = num_workers
+ pipeline_options["direct_running_mode"] = "multi_processing"
+ # TODO: Fix ModuleNotFoundError: No module named 'datasets_modules' when running multiprocessed DirectRunner
+ raise NotImplementedError("Using a DirectRunner with `num_proc` for multiprocessing it not supported yet.")
+ beam_options = beam_options or beam.options.pipeline_options.PipelineOptions.from_dictionary(pipeline_options)
+ # Use a single pipeline for all splits
+ pipeline = beam_utils.BeamPipeline(
+ runner=beam_runner,
+ options=beam_options,
+ )
+ super()._download_and_prepare(
+ dl_manager, verification_mode=VerificationMode.NO_CHECKS, pipeline=pipeline, **prepare_splits_kwargs
+ ) # TODO handle verification_mode in beam datasets
+ # Run pipeline
+ pipeline_results = pipeline.run()
+ pipeline_results.wait_until_finish()
+ metrics = pipeline_results.metrics()
+ # Update `info.splits`.
+ split_dict = self.info.splits
+ for split_name, beam_writer in self._beam_writers.items():
+ m_filter = beam.metrics.MetricsFilter().with_namespace(namespace=split_name)
+ num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter))
+ split_info = split_dict[split_name]
+ split_info.num_examples = num_examples
+ split_info.num_bytes = num_bytes
+ if hasattr(beam_writer, "_shard_lengths") and len(beam_writer._shard_lengths) > 1:
+ # keep the -SSSSS-of-NNNNN pattern
+ split_info.shard_lengths = beam_writer._shard_lengths
+ else:
+ # don't use any pattern
+ file_format = prepare_splits_kwargs.get("file_format", "arrow")
+ src_fname = f"{self.dataset_name}-{split_name}-00000-of-00001.{file_format}"
+ dst_fname = f"{self.dataset_name}-{split_name}.{file_format}"
+ src_fpath = posixpath.join(self._output_dir, src_fname)
+ dst_fpath = posixpath.join(self._output_dir, dst_fname)
+ self._rename(src_fpath, dst_fpath)
+
+ def _save_info(self):
+ download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ with xopen(f"{self._output_dir}/{config.DATASET_INFO_FILENAME}", "wb", download_config=download_config) as f:
+ self.info._dump_info(f)
+ if self.info.license:
+ with xopen(f"{self._output_dir}/{config.LICENSE_FILENAME}", "wb", download_config=download_config) as f:
+ self.info._dump_license(f)
+
+ def _prepare_split(
+ self, split_generator, pipeline, file_format="arrow", max_shard_size: Optional[Union[str, int]] = None
+ ):
+ import apache_beam as beam
+
+ if max_shard_size is not None:
+ raise NotImplementedError(
+ "max_shard_size is not supported for Beam datasets."
+ "Please set it to None to use the default Apache Beam sharding and get the best performance."
+ )
+
+ # To write examples in filesystem:
+ split_name = split_generator.split_info.name
+ fname = f"{self.dataset_name}-{split_name}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+ beam_writer = BeamWriter(
+ features=self.info.features, path=fpath, namespace=split_name, cache_dir=self._output_dir
+ )
+ self._beam_writers[split_name] = beam_writer
+
+ encode_example = self.info.features.encode_example
+
+ # Note: We need to wrap the pipeline in a PTransform to avoid re-using the
+ # same label names for each split
+ @beam.ptransform_fn
+ def _build_pcollection(pipeline):
+ """PTransformation which build a single split."""
+ # Encode the PCollection
+ pcoll_examples = self._build_pcollection(pipeline, **split_generator.gen_kwargs)
+ pcoll_examples |= "Encode" >> beam.Map(lambda key_ex: (key_ex[0], encode_example(key_ex[1])))
+ return beam_writer.write_from_pcollection(pcoll_examples)
+
+ # Add the PCollection to the pipeline
+ _ = pipeline | split_name >> _build_pcollection() # pylint: disable=no-value-for-parameter max_bytes_per_shard
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ self._request_info_from_hf_gcs()
+ datasets = {
+ split.name: IterableDataset(self._get_examples_iterable_for_split(split), info=self.info, split=split.name)
+ for split in self.info.splits.values()
+ }
+ if split:
+ try:
+ datasets = datasets[split]
+ except KeyError:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(datasets)}")
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _get_examples_iterable_for_split(self, split: SplitInfo) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples_from_hf_gcs, {"split": split})
+
+ def _generate_examples_from_hf_gcs(self, split: SplitInfo):
+ if split.shard_lengths:
+ num_shards = len(split.shard_lengths)
+ remote_prepared_urls = [
+ f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}-{shard_id:05d}-of-{num_shards:05d}.arrow"
+ for shard_id in range(num_shards)
+ ]
+ else:
+ remote_prepared_urls = [f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}.arrow"]
+ key = 0
+ download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ for remote_prepared_url in remote_prepared_urls:
+ with xopen(remote_prepared_url, "rb", download_config=download_config) as f:
+ with pa.ipc.open_stream(f) as reader:
+ for record_batch in reader:
+ for record in record_batch.to_pylist():
+ yield key, record
+ key += 1
+
+ def _request_info_from_hf_gcs(self):
+ from .download.streaming_download_manager import xopen
+
+ remote_dataset_info = f"{self._remote_cache_dir_from_hf_gcs}/{config.DATASET_INFO_FILENAME}"
+ try:
+ download_config = download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ with xopen(remote_dataset_info, download_config=download_config) as f:
+ import json
+
+ _info = json.load(f)
+ except FileNotFoundError as err:
+ raise DatasetNotOnHfGcsError(err) from None
+ self.info.update(DatasetInfo.from_dict(_info))
+
+ @property
+ def _remote_cache_dir_from_hf_gcs(self):
+ relative_data_dir = self._relative_data_dir(with_hash=False)
+ return HF_GCP_BASE_URL + "/" + Path(relative_data_dir).as_posix()
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/combine.py b/env-llmeval/lib/python3.10/site-packages/datasets/combine.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2aad87f0cc9278626d0be5111f91b6de49ef935
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/combine.py
@@ -0,0 +1,215 @@
+from typing import List, Optional, TypeVar
+
+from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .info import DatasetInfo
+from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
+from .splits import NamedSplit
+from .utils import logging
+from .utils.py_utils import Literal
+
+
+logger = logging.get_logger(__name__)
+
+
+DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
+
+
+def interleave_datasets(
+ datasets: List[DatasetType],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+) -> DatasetType:
+ """
+ Interleave several datasets (sources) into a single dataset.
+ The new dataset is constructed by alternating between the sources to get the examples.
+
+ You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects.
+
+ - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples.
+ - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
+
+ The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`,
+ in which case, the resulting dataset ends when all datasets have ran out of examples at least one time.
+
+ Note for iterable datasets:
+
+ In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process.
+ Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker).
+
+ Args:
+ datasets (`List[Dataset]` or `List[IterableDataset]`):
+ List of datasets to interleave.
+ probabilities (`List[float]`, *optional*, defaults to `None`):
+ If specified, the new dataset is constructed by sampling
+ examples from one source at a time according to these probabilities.
+ seed (`int`, *optional*, defaults to `None`):
+ The random seed used to choose a source for each example.
+ info ([`DatasetInfo`], *optional*):
+ Dataset information, like description, citation, etc.
+
+ split ([`NamedSplit`], *optional*):
+ Name of the dataset split.
+
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now, `first_exhausted` and `all_exhausted`.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+ Returns:
+ [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets`
+ parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of
+ `IterableDataset`.
+
+ Example:
+
+ For regular datasets (map-style):
+
+ ```python
+ >>> from datasets import Dataset, interleave_datasets
+ >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
+ >>> d2 = Dataset.from_dict({"a": [10, 11, 12]})
+ >>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2]
+ >>> dataset = interleave_datasets([d1, d2, d3])
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
+ >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
+ >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]})
+ >>> dataset = interleave_datasets([d1, d2, d3])
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24]
+ For datasets in streaming mode (iterable):
+
+ >>> from datasets import load_dataset, interleave_datasets
+ >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True)
+ >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True)
+ >>> dataset = interleave_datasets([d1, d2])
+ >>> iterator = iter(dataset)
+ >>> next(iterator)
+ {'text': 'Mtendere Village was inspired by the vision...}
+ >>> next(iterator)
+ {'text': "Média de débat d'idées, de culture...}
+ ```
+ """
+ from .arrow_dataset import Dataset
+ from .iterable_dataset import IterableDataset
+
+ if not datasets:
+ raise ValueError("Unable to interleave an empty list of datasets.")
+ for i, dataset in enumerate(datasets):
+ if not isinstance(dataset, (Dataset, IterableDataset)):
+ if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
+ if not dataset:
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
+ "is an empty dataset dictionary."
+ )
+ raise ValueError(
+ f"Dataset at position {i} has at least one split: {list(dataset)}\n"
+ f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
+ )
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
+ )
+ if i == 0:
+ dataset_type, other_type = (
+ (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
+ )
+ elif not isinstance(dataset, dataset_type):
+ raise ValueError(
+ f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
+ )
+ if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
+ raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.")
+ if dataset_type is Dataset:
+ return _interleave_map_style_datasets(
+ datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
+ )
+ else:
+ return _interleave_iterable_datasets(
+ datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
+ )
+
+
+def concatenate_datasets(
+ dsets: List[DatasetType],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+) -> DatasetType:
+ """
+ Converts a list of [`Dataset`] with the same schema into a single [`Dataset`].
+
+ Args:
+ dsets (`List[datasets.Dataset]`):
+ List of Datasets to concatenate.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ axis (`{0, 1}`, defaults to `0`):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+
+ Example:
+
+ ```py
+ >>> ds3 = concatenate_datasets([ds1, ds2])
+ ```
+ """
+
+ if not dsets:
+ raise ValueError("Unable to concatenate an empty list of datasets.")
+ for i, dataset in enumerate(dsets):
+ if not isinstance(dataset, (Dataset, IterableDataset)):
+ if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
+ if not dataset:
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
+ "is an empty dataset dictionary."
+ )
+ raise ValueError(
+ f"Dataset at position {i} has at least one split: {list(dataset)}\n"
+ f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
+ )
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
+ )
+ if i == 0:
+ dataset_type, other_type = (
+ (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
+ )
+ elif not isinstance(dataset, dataset_type):
+ raise ValueError(
+ f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
+ )
+ if dataset_type is Dataset:
+ return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis)
+ else:
+ return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/config.py b/env-llmeval/lib/python3.10/site-packages/datasets/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..32127bea7dcfc5f4bebce12f298ee8c8e3370f70
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/config.py
@@ -0,0 +1,259 @@
+import importlib
+import importlib.metadata
+import logging
+import os
+import platform
+from pathlib import Path
+from typing import Optional
+
+from packaging import version
+
+
+logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging
+
+# Datasets
+S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets"
+CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets"
+REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}"
+
+# Metrics
+S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
+CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
+REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}"
+
+# Hub
+HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
+HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
+HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}"
+HUB_DEFAULT_VERSION = "main"
+
+PY_VERSION = version.parse(platform.python_version())
+
+# General environment variables accepted values for booleans
+ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
+ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"}
+ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
+ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"})
+
+
+# Imports
+DILL_VERSION = version.parse(importlib.metadata.version("dill"))
+FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec"))
+PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
+PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
+HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub"))
+
+USE_TF = os.environ.get("USE_TF", "AUTO").upper()
+USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
+USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
+
+TORCH_VERSION = "N/A"
+TORCH_AVAILABLE = False
+
+if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
+ TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
+ if TORCH_AVAILABLE:
+ try:
+ TORCH_VERSION = version.parse(importlib.metadata.version("torch"))
+ logger.info(f"PyTorch version {TORCH_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling PyTorch because USE_TF is set")
+
+TF_VERSION = "N/A"
+TF_AVAILABLE = False
+
+if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
+ TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
+ if TF_AVAILABLE:
+ # For the metadata, we have to look for both tensorflow and tensorflow-cpu
+ for package in [
+ "tensorflow",
+ "tensorflow-cpu",
+ "tensorflow-gpu",
+ "tf-nightly",
+ "tf-nightly-cpu",
+ "tf-nightly-gpu",
+ "intel-tensorflow",
+ "tensorflow-rocm",
+ "tensorflow-macos",
+ ]:
+ try:
+ TF_VERSION = version.parse(importlib.metadata.version(package))
+ except importlib.metadata.PackageNotFoundError:
+ continue
+ else:
+ break
+ else:
+ TF_AVAILABLE = False
+ if TF_AVAILABLE:
+ if TF_VERSION.major < 2:
+ logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
+ TF_AVAILABLE = False
+ else:
+ logger.info(f"TensorFlow version {TF_VERSION} available.")
+else:
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
+
+
+JAX_VERSION = "N/A"
+JAX_AVAILABLE = False
+
+if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None
+ if JAX_AVAILABLE:
+ try:
+ JAX_VERSION = version.parse(importlib.metadata.version("jax"))
+ logger.info(f"JAX version {JAX_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling JAX because USE_JAX is set to False")
+
+
+USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper()
+BEAM_VERSION = "N/A"
+BEAM_AVAILABLE = False
+if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ try:
+ BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam"))
+ BEAM_AVAILABLE = True
+ logger.info(f"Apache Beam version {BEAM_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling Apache Beam because USE_BEAM is set to False")
+
+
+# Optional tools for data loading
+SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None
+
+# Optional tools for feature decoding
+PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None
+IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
+ importlib.import_module("soundfile").__libsndfile_version__
+) >= version.parse("1.0.31")
+IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
+ importlib.import_module("soundfile").__libsndfile_version__
+) >= version.parse("1.1.0")
+
+# Optional compression tools
+RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None
+ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None
+LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None
+PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None
+
+# Cache location
+DEFAULT_XDG_CACHE_HOME = "~/.cache"
+XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
+DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
+HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
+
+DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
+HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
+
+DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
+HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
+
+DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
+HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
+
+DOWNLOADED_DATASETS_DIR = "downloads"
+DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR)
+DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH))
+
+EXTRACTED_DATASETS_DIR = "extracted"
+DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR)
+EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH))
+
+# Download count for the website
+HF_UPDATE_DOWNLOAD_COUNTS = (
+ os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
+)
+
+# Remote dataset scripts support
+__HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "1")
+HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = (
+ True
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES
+ else False
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES
+ else None
+)
+TIME_OUT_REMOTE_CODE = 15
+
+# Datasets-server
+USE_PARQUET_EXPORT = True
+
+# Batch size constants. For more info, see:
+# https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations)
+DEFAULT_MAX_BATCH_SIZE = 1000
+
+# Size of the preloaded record batch in `Dataset.__iter__`
+ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10
+
+# Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare)
+MAX_SHARD_SIZE = "500MB"
+
+# Parquet configuration
+PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100
+PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100
+PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100
+
+# Offline mode
+HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
+
+# Here, `True` will disable progress bars globally without possibility of enabling it
+# programmatically. `False` will enable them without possibility of disabling them.
+# If environment variable is not set (None), then the user is free to enable/disable
+# them programmatically.
+# TL;DR: env variable has priority over code
+__HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS")
+HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = (
+ __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES
+ if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None
+ else None
+)
+
+# In-memory
+DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled
+IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE))
+
+# File names
+DATASET_ARROW_FILENAME = "dataset.arrow"
+DATASET_INDICES_FILENAME = "indices.arrow"
+DATASET_STATE_JSON_FILENAME = "state.json"
+DATASET_INFO_FILENAME = "dataset_info.json"
+DATASETDICT_INFOS_FILENAME = "dataset_infos.json"
+LICENSE_FILENAME = "LICENSE"
+METRIC_INFO_FILENAME = "metric_info.json"
+DATASETDICT_JSON_FILENAME = "dataset_dict.json"
+METADATA_CONFIGS_FIELD = "configs"
+REPOCARD_FILENAME = "README.md"
+REPOYAML_FILENAME = ".huggingface.yaml"
+
+MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules"
+
+MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255
+
+# Temporary cache directory prefix
+TEMP_CACHE_DIR_PREFIX = "hf_datasets-"
+
+# Streaming
+STREAMING_READ_MAX_RETRIES = 20
+STREAMING_READ_RETRY_INTERVAL = 5
+
+# Datasets without script
+DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
+GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10
+ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
+
+# Progress bars
+PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec
+
+# Maximum number of uploaded files per commit
+UPLOADS_MAX_NUMBER_PER_COMMIT = 50
+
+# Backward compatibiliy
+MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/data_files.py b/env-llmeval/lib/python3.10/site-packages/datasets/data_files.py
new file mode 100644
index 0000000000000000000000000000000000000000..752145413db8e770e921b99fd2b6bab00fcf1b4b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/data_files.py
@@ -0,0 +1,806 @@
+import os
+import re
+from functools import partial
+from glob import has_magic
+from pathlib import Path, PurePath
+from typing import Callable, Dict, List, Optional, Set, Tuple, Union
+
+import huggingface_hub
+from fsspec import get_fs_token_paths
+from fsspec.implementations.http import HTTPFileSystem
+from huggingface_hub import HfFileSystem
+from packaging import version
+from tqdm.contrib.concurrent import thread_map
+
+from . import config
+from .download import DownloadConfig
+from .download.streaming_download_manager import _prepare_path_and_storage_options, xbasename, xjoin
+from .naming import _split_re
+from .splits import Split
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.file_utils import is_local_path, is_relative_path
+from .utils.py_utils import glob_pattern_to_regex, string_to_dict
+
+
+SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN)
+
+
+logger = logging.get_logger(__name__)
+
+
+class Url(str):
+ pass
+
+
+class EmptyDatasetError(FileNotFoundError):
+ pass
+
+
+SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"
+
+SPLIT_KEYWORDS = {
+ Split.TRAIN: ["train", "training"],
+ Split.VALIDATION: ["validation", "valid", "dev", "val"],
+ Split.TEST: ["test", "testing", "eval", "evaluation"],
+}
+NON_WORDS_CHARS = "-._ 0-9"
+if config.FSSPEC_VERSION < version.parse("2023.9.0"):
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**[{sep}/]{keyword}[{sep}/]**"]
+elif config.FSSPEC_VERSION < version.parse("2023.12.0"):
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**/*[{sep}/]{keyword}[{sep}/]**"]
+else:
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = [
+ "**/{keyword}[{sep}]*",
+ "**/{keyword}/**",
+ "**/*[{sep}]{keyword}[{sep}]*",
+ "**/*[{sep}]{keyword}[{sep}]*/**",
+ "**/{keyword}[{sep}]*/**",
+ "**/*[{sep}]{keyword}/**",
+ ]
+
+DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST]
+DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME = {
+ split: [
+ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
+ for keyword in SPLIT_KEYWORDS[split]
+ for pattern in KEYWORDS_IN_PATH_NAME_BASE_PATTERNS
+ ]
+ for split in DEFAULT_SPLITS
+}
+
+DEFAULT_PATTERNS_ALL = {
+ Split.TRAIN: ["**"],
+}
+
+ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED]
+ALL_DEFAULT_PATTERNS = [
+ DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME,
+ DEFAULT_PATTERNS_ALL,
+]
+if config.FSSPEC_VERSION < version.parse("2023.9.0"):
+ METADATA_PATTERNS = [
+ "metadata.csv",
+ "**/metadata.csv",
+ "metadata.jsonl",
+ "**/metadata.jsonl",
+ ] # metadata file for ImageFolder and AudioFolder
+else:
+ METADATA_PATTERNS = [
+ "**/metadata.csv",
+ "**/metadata.jsonl",
+ ] # metadata file for ImageFolder and AudioFolder
+WILDCARD_CHARACTERS = "*[]"
+FILES_TO_IGNORE = [
+ "README.md",
+ "config.json",
+ "dataset_info.json",
+ "dataset_infos.json",
+ "dummy_data.zip",
+ "dataset_dict.json",
+]
+
+
+def contains_wildcards(pattern: str) -> bool:
+ return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS)
+
+
+def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]:
+ """
+ Take the data_files patterns from the user, and format them into a dictionary.
+ Each key is the name of the split, and each value is a list of data files patterns (paths or urls).
+ The default split is "train".
+
+ Returns:
+ patterns: dictionary of split_name -> list of patterns
+ """
+ if isinstance(patterns, dict):
+ return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()}
+ elif isinstance(patterns, str):
+ return {SANITIZED_DEFAULT_SPLIT: [patterns]}
+ elif isinstance(patterns, list):
+ if any(isinstance(pattern, dict) for pattern in patterns):
+ for pattern in patterns:
+ if not (
+ isinstance(pattern, dict)
+ and len(pattern) == 2
+ and "split" in pattern
+ and isinstance(pattern.get("path"), (str, list))
+ ):
+ raise ValueError(
+ f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}"
+ )
+ splits = [pattern["split"] for pattern in patterns]
+ if len(set(splits)) != len(splits):
+ raise ValueError(f"Some splits are duplicated in data_files: {splits}")
+ return {
+ str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]]
+ for pattern in patterns
+ }
+ else:
+ return {SANITIZED_DEFAULT_SPLIT: patterns}
+ else:
+ return sanitize_patterns(list(patterns))
+
+
+def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool:
+ """
+ When a path matches a pattern, we additionnally check if it's inside a special directory
+ we ignore by default (if it starts with a double underscore).
+
+ Users can still explicitly request a filepath inside such a directory if "__pycache__" is
+ mentioned explicitly in the requested pattern.
+
+ Some examples:
+
+ base directory:
+
+ ./
+ └── __pycache__
+ └── b.txt
+
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**")
+ True
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt")
+ True
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*")
+ False
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*")
+ False
+ """
+ # We just need to check if every special directories from the path is present explicly in the pattern.
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
+ # the parent path and the parent pattern have the same number of special directories.
+ data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")]
+ data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")]
+ return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern)
+
+
+def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool:
+ """
+ When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside
+ a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot.
+
+ Users can still explicitly request a filepath that is hidden or is inside a hidden directory
+ if the hidden part is mentioned explicitly in the requested pattern.
+
+ Some examples:
+
+ base directory:
+
+ ./
+ └── .hidden_file.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*")
+ False
+
+ base directory:
+
+ ./
+ └── .hidden_dir
+ └── a.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*")
+ False
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*")
+ False
+
+ base directory:
+
+ ./
+ └── .hidden_dir
+ └── .hidden_file.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*")
+ False
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*")
+ False
+ """
+ # We just need to check if every hidden part from the path is present explicly in the pattern.
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
+ # the path and the pattern have the same number of hidden parts.
+ hidden_directories_in_path = [
+ part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."}
+ ]
+ hidden_directories_in_pattern = [
+ part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."}
+ ]
+ return len(hidden_directories_in_path) != len(hidden_directories_in_pattern)
+
+
+def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]:
+ """
+ Get the default pattern from a directory or repository by testing all the supported patterns.
+ The first patterns to return a non-empty list of data files is returned.
+
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
+ """
+ # first check the split patterns like data/{split}-00000-of-00001.parquet
+ for split_pattern in ALL_SPLIT_PATTERNS:
+ pattern = split_pattern.replace("{split}", "*")
+ try:
+ data_files = pattern_resolver(pattern)
+ except FileNotFoundError:
+ continue
+ if len(data_files) > 0:
+ splits: Set[str] = {
+ string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))["split"]
+ for p in data_files
+ }
+ if any(not re.match(_split_re, split) for split in splits):
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{splits}'.")
+ sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted(
+ splits - set(DEFAULT_SPLITS)
+ )
+ return {split: [split_pattern.format(split=split)] for split in sorted_splits}
+ # then check the default patterns based on train/valid/test splits
+ for patterns_dict in ALL_DEFAULT_PATTERNS:
+ non_empty_splits = []
+ for split, patterns in patterns_dict.items():
+ for pattern in patterns:
+ try:
+ data_files = pattern_resolver(pattern)
+ except FileNotFoundError:
+ continue
+ if len(data_files) > 0:
+ non_empty_splits.append(split)
+ break
+ if non_empty_splits:
+ return {split: patterns_dict[split] for split in non_empty_splits}
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
+
+
+def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]:
+ """
+ Get the supported metadata patterns from a directory or repository.
+ """
+ non_empty_patterns = []
+ for pattern in METADATA_PATTERNS:
+ try:
+ metadata_files = pattern_resolver(pattern)
+ if len(metadata_files) > 0:
+ non_empty_patterns.append(pattern)
+ except FileNotFoundError:
+ pass
+ if non_empty_patterns:
+ return non_empty_patterns
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
+
+
+def resolve_pattern(
+ pattern: str,
+ base_path: str,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+) -> List[str]:
+ """
+ Resolve the paths and URLs of the data files from the pattern passed by the user.
+
+ You can use patterns to resolve multiple local files. Here are a few examples:
+ - *.csv to match all the CSV files at the first level
+ - **.csv to match all the CSV files at any level
+ - data/* to match all the files inside "data"
+ - data/** to match all the files inside "data" and its subdirectories
+
+ The patterns are resolved using the fsspec glob. In fsspec>=2023.12.0 this is equivalent to
+ Python's glob.glob, Path.glob, Path.match and fnmatch where ** is unsupported with a prefix/suffix
+ other than a forward slash /.
+
+ More generally:
+ - '*' matches any character except a forward-slash (to match just the file or directory name)
+ - '**' matches any character including a forward-slash /
+
+ Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested.
+ The same applies to special directories that start with a double underscore like "__pycache__".
+ You can still include one if the pattern explicilty mentions it:
+ - to include a hidden file: "*/.hidden.txt" or "*/.*"
+ - to include a hidden directory: ".hidden/*" or ".*/*"
+ - to include a special directory: "__special__/*" or "__*/*"
+
+ Example::
+
+ >>> from datasets.data_files import resolve_pattern
+ >>> base_path = "."
+ >>> resolve_pattern("docs/**/*.py", base_path)
+ [/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py']
+
+ Args:
+ pattern (str): Unix pattern or paths or URLs of the data files to resolve.
+ The paths can be absolute or relative to base_path.
+ Remote filesystems using fsspec are supported, e.g. with the hf:// protocol.
+ base_path (str): Base path to use when resolving relative paths.
+ allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions).
+ For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"]
+ Returns:
+ List[str]: List of paths or URLs to the local or remote files that match the patterns.
+ """
+ if is_relative_path(pattern):
+ pattern = xjoin(base_path, pattern)
+ elif is_local_path(pattern):
+ base_path = os.path.splitdrive(pattern)[0] + os.sep
+ else:
+ base_path = ""
+ pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config)
+ fs, _, _ = get_fs_token_paths(pattern, storage_options=storage_options)
+ fs_base_path = base_path.split("::")[0].split("://")[-1] or fs.root_marker
+ fs_pattern = pattern.split("::")[0].split("://")[-1]
+ files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)}
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]
+ protocol_prefix = protocol + "://" if protocol != "file" else ""
+ glob_kwargs = {}
+ if protocol == "hf" and config.HF_HUB_VERSION >= version.parse("0.20.0"):
+ # 10 times faster glob with detail=True (ignores costly info like lastCommit)
+ glob_kwargs["expand_info"] = False
+ matched_paths = [
+ filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath
+ for filepath, info in fs.glob(pattern, detail=True, **glob_kwargs).items()
+ if info["type"] == "file"
+ and (xbasename(filepath) not in files_to_ignore)
+ and not _is_inside_unrequested_special_dir(
+ os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path)
+ )
+ and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(
+ os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path)
+ )
+ ] # ignore .ipynb and __pycache__, but keep /../
+ if allowed_extensions is not None:
+ out = [
+ filepath
+ for filepath in matched_paths
+ if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:])
+ ]
+ if len(out) < len(matched_paths):
+ invalid_matched_files = list(set(matched_paths) - set(out))
+ logger.info(
+ f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}"
+ )
+ else:
+ out = matched_paths
+ if not out:
+ error_msg = f"Unable to find '{pattern}'"
+ if allowed_extensions is not None:
+ error_msg += f" with any supported extension {list(allowed_extensions)}"
+ raise FileNotFoundError(error_msg)
+ return out
+
+
+def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]:
+ """
+ Get the default pattern from a directory testing all the supported patterns.
+ The first patterns to return a non-empty list of data files is returned.
+
+ Some examples of supported patterns:
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── dataset.csv
+
+ Output:
+
+ {"train": ["**"]}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ ├── train.csv
+ └── test.csv
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train.csv
+ └── test.csv
+
+ my_dataset_repository/
+ ├── README.md
+ ├── train_0.csv
+ ├── train_1.csv
+ ├── train_2.csv
+ ├── train_3.csv
+ ├── test_0.csv
+ └── test_1.csv
+
+ Output:
+
+ {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'],
+ 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train/
+ │ ├── shard_0.csv
+ │ ├── shard_1.csv
+ │ ├── shard_2.csv
+ │ └── shard_3.csv
+ └── test/
+ ├── shard_0.csv
+ └── shard_1.csv
+
+ Output:
+
+ {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'],
+ 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train-00000-of-00003.csv
+ ├── train-00001-of-00003.csv
+ ├── train-00002-of-00003.csv
+ ├── test-00000-of-00001.csv
+ ├── random-00000-of-00003.csv
+ ├── random-00001-of-00003.csv
+ └── random-00002-of-00003.csv
+
+ Output:
+
+ {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
+ 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
+ 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']}
+
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
+ """
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
+ try:
+ return _get_data_files_patterns(resolver)
+ except FileNotFoundError:
+ raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None
+
+
+def get_metadata_patterns(
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+) -> List[str]:
+ """
+ Get the supported metadata patterns from a local directory.
+ """
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
+ try:
+ return _get_metadata_files_patterns(resolver)
+ except FileNotFoundError:
+ raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None
+
+
+def _get_single_origin_metadata(
+ data_file: str,
+ download_config: Optional[DownloadConfig] = None,
+) -> Tuple[str]:
+ data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config)
+ fs, _, _ = get_fs_token_paths(data_file, storage_options=storage_options)
+ if isinstance(fs, HfFileSystem):
+ resolved_path = fs.resolve_path(data_file)
+ return (resolved_path.repo_id, resolved_path.revision)
+ elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT):
+ hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
+ data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
+ resolved_path = hffs.resolve_path(data_file)
+ return (resolved_path.repo_id, resolved_path.revision)
+ info = fs.info(data_file)
+ # s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime
+ for key in ["ETag", "etag", "mtime"]:
+ if key in info:
+ return (str(info[key]),)
+ return ()
+
+
+def _get_origin_metadata(
+ data_files: List[str],
+ max_workers=64,
+ download_config: Optional[DownloadConfig] = None,
+) -> Tuple[str]:
+ return thread_map(
+ partial(_get_single_origin_metadata, download_config=download_config),
+ data_files,
+ max_workers=max_workers,
+ tqdm_class=hf_tqdm,
+ desc="Resolving data files",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(data_files) <= 16 or None,
+ )
+
+
+class DataFilesList(List[str]):
+ """
+ List of data files (absolute local paths or URLs).
+ It has two construction methods given the user's data files patterns :
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
+ - ``from_local_or_remote``: resolve patterns from a local path
+
+ Moreover DataFilesList has an additional attribute ``origin_metadata``.
+ It can store:
+ - the last modified time of local files
+ - ETag of remote files
+ - commit sha of a dataset repository
+
+ Thanks to this additional attribute, it is possible to hash the list
+ and get a different hash if and only if at least one file changed.
+ This is useful for caching Dataset objects that are obtained from a list of data files.
+ """
+
+ def __init__(self, data_files: List[str], origin_metadata: List[Tuple[str]]):
+ super().__init__(data_files)
+ self.origin_metadata = origin_metadata
+
+ def __add__(self, other):
+ return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata)
+
+ @classmethod
+ def from_hf_repo(
+ cls,
+ patterns: List[str],
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/")
+ return cls.from_patterns(
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
+ )
+
+ @classmethod
+ def from_local_or_remote(
+ cls,
+ patterns: List[str],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ return cls.from_patterns(
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
+ )
+
+ @classmethod
+ def from_patterns(
+ cls,
+ patterns: List[str],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ data_files = []
+ for pattern in patterns:
+ try:
+ data_files.extend(
+ resolve_pattern(
+ pattern,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ )
+ except FileNotFoundError:
+ if not has_magic(pattern):
+ raise
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
+ return cls(data_files, origin_metadata)
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
+ pattern = "|".join("\\" + ext for ext in extensions)
+ pattern = re.compile(f".*({pattern})(\\..+)?$")
+ return DataFilesList(
+ [data_file for data_file in self if pattern.match(data_file)],
+ origin_metadata=self.origin_metadata,
+ )
+
+
+class DataFilesDict(Dict[str, DataFilesList]):
+ """
+ Dict of split_name -> list of data files (absolute local paths or URLs).
+ It has two construction methods given the user's data files patterns :
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
+ - ``from_local_or_remote``: resolve patterns from a local path
+
+ Moreover each list is a DataFilesList. It is possible to hash the dictionary
+ and get a different hash if and only if at least one file changed.
+ For more info, see ``DataFilesList``.
+
+ This is useful for caching Dataset objects that are obtained from a list of data files.
+
+ Changing the order of the keys of this dictionary also doesn't change its hash.
+ """
+
+ @classmethod
+ def from_local_or_remote(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_local_or_remote(
+ patterns_for_key,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ @classmethod
+ def from_hf_repo(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_hf_repo(
+ patterns_for_key,
+ dataset_info=dataset_info,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ @classmethod
+ def from_patterns(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_patterns(
+ patterns_for_key,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
+ out = type(self)()
+ for key, data_files_list in self.items():
+ out[key] = data_files_list.filter_extensions(extensions)
+ return out
+
+
+class DataFilesPatternsList(List[str]):
+ """
+ List of data files patterns (absolute local paths or URLs).
+ For each pattern there should also be a list of allowed extensions
+ to keep, or a None ot keep all the files for the pattern.
+ """
+
+ def __init__(
+ self,
+ patterns: List[str],
+ allowed_extensions: List[Optional[List[str]]],
+ ):
+ super().__init__(patterns)
+ self.allowed_extensions = allowed_extensions
+
+ def __add__(self, other):
+ return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions)
+
+ @classmethod
+ def from_patterns(
+ cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None
+ ) -> "DataFilesPatternsDict":
+ return cls(patterns, [allowed_extensions] * len(patterns))
+
+ def resolve(
+ self,
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ data_files = []
+ for pattern, allowed_extensions in zip(self, self.allowed_extensions):
+ try:
+ data_files.extend(
+ resolve_pattern(
+ pattern,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ )
+ except FileNotFoundError:
+ if not has_magic(pattern):
+ raise
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
+ return DataFilesList(data_files, origin_metadata)
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
+ return DataFilesPatternsList(
+ self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions]
+ )
+
+
+class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]):
+ """
+ Dict of split_name -> list of data files patterns (absolute local paths or URLs).
+ """
+
+ @classmethod
+ def from_patterns(
+ cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None
+ ) -> "DataFilesPatternsDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesPatternsList.from_patterns(
+ patterns_for_key,
+ allowed_extensions=allowed_extensions,
+ )
+ if not isinstance(patterns_for_key, DataFilesPatternsList)
+ else patterns_for_key
+ )
+ return out
+
+ def resolve(
+ self,
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = DataFilesDict()
+ for key, data_files_patterns_list in self.items():
+ out[key] = data_files_patterns_list.resolve(base_path, download_config)
+ return out
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict":
+ out = type(self)()
+ for key, data_files_patterns_list in self.items():
+ out[key] = data_files_patterns_list.filter_extensions(extensions)
+ return out
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/dataset_dict.py b/env-llmeval/lib/python3.10/site-packages/datasets/dataset_dict.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab26dbbb83d599cf62e6485d4d647871ea7f3a0d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/dataset_dict.py
@@ -0,0 +1,2288 @@
+import contextlib
+import copy
+import fnmatch
+import json
+import math
+import posixpath
+import re
+import warnings
+from io import BytesIO
+from pathlib import Path
+from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
+
+import fsspec
+import numpy as np
+from huggingface_hub import (
+ CommitInfo,
+ CommitOperationAdd,
+ CommitOperationDelete,
+ DatasetCard,
+ DatasetCardData,
+ HfApi,
+)
+
+from . import config
+from .arrow_dataset import PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED, Dataset
+from .features import Features
+from .features.features import FeatureType
+from .info import DatasetInfo, DatasetInfosDict
+from .naming import _split_re
+from .splits import NamedSplit, Split, SplitDict, SplitInfo
+from .table import Table
+from .tasks import TaskTemplate
+from .utils import logging
+from .utils.deprecation_utils import deprecated
+from .utils.doc_utils import is_documented_by
+from .utils.hub import list_files_info
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict
+from .utils.typing import PathLike
+
+
+logger = logging.get_logger(__name__)
+
+
+class DatasetDict(dict):
+ """A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)"""
+
+ def _check_values_type(self):
+ for dataset in self.values():
+ if not isinstance(dataset, Dataset):
+ raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
+
+ def _check_values_features(self):
+ items = list(self.items())
+ for item_a, item_b in zip(items[:-1], items[1:]):
+ if item_a[1].features != item_b[1].features:
+ raise ValueError(
+ f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
+ )
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
+ for dataset in self.values():
+ if hasattr(dataset, "_data"):
+ del dataset._data
+ if hasattr(dataset, "_indices"):
+ del dataset._indices
+
+ def __getitem__(self, k) -> Dataset:
+ if isinstance(k, (str, NamedSplit)) or len(self) == 0:
+ return super().__getitem__(k)
+ else:
+ available_suggested_splits = [
+ split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self
+ ]
+ suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0]
+ raise KeyError(
+ f"Invalid key: {k}. Please first select a split. For example: "
+ f"`my_dataset_dictionary['{suggested_split}'][{k}]`. "
+ f"Available splits: {sorted(self)}"
+ )
+
+ @property
+ def data(self) -> Dict[str, Table]:
+ """The Apache Arrow tables backing each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.data
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.data for k, dataset in self.items()}
+
+ @property
+ def cache_files(self) -> Dict[str, Dict]:
+ """The cache files containing the Apache Arrow table backing each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.cache_files
+ {'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
+ 'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
+ 'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.cache_files for k, dataset in self.items()}
+
+ @property
+ def num_columns(self) -> Dict[str, int]:
+ """Number of columns in each split of the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.num_columns
+ {'test': 2, 'train': 2, 'validation': 2}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.num_columns for k, dataset in self.items()}
+
+ @property
+ def num_rows(self) -> Dict[str, int]:
+ """Number of rows in each split of the dataset (same as :func:`datasets.Dataset.__len__`).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.num_rows
+ {'test': 1066, 'train': 8530, 'validation': 1066}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.num_rows for k, dataset in self.items()}
+
+ @property
+ def column_names(self) -> Dict[str, List[str]]:
+ """Names of the columns in each split of the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.column_names
+ {'test': ['text', 'label'],
+ 'train': ['text', 'label'],
+ 'validation': ['text', 'label']}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.column_names for k, dataset in self.items()}
+
+ @property
+ def shape(self) -> Dict[str, Tuple[int]]:
+ """Shape of each split of the dataset (number of columns, number of rows).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.shape
+ {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.shape for k, dataset in self.items()}
+
+ def flatten(self, max_depth=16) -> "DatasetDict":
+ """Flatten the Apache Arrow Table of each split (nested features are flatten).
+ Each column with a struct type is flattened into one column per struct field.
+ Other columns are left unchanged.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad")
+ >>> ds["train"].features
+ {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ >>> ds.flatten()
+ DatasetDict({
+ train: Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 87599
+ })
+ validation: Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 10570
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
+
+ def unique(self, column: str) -> Dict[str, List]:
+ """Return a list of the unique elements in a column for each split.
+
+ This is implemented in the low-level backend and as such, very fast.
+
+ Args:
+ column (`str`):
+ column name (list all the column names with [`~datasets.Dataset.column_names`])
+
+ Returns:
+ Dict[`str`, `list`]: Dictionary of unique elements in the given column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.unique("label")
+ {'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.unique(column) for k, dataset in self.items()}
+
+ def cleanup_cache_files(self) -> Dict[str, int]:
+ """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
+ Be careful when running this command that no other process is currently using other cache files.
+
+ Return:
+ `Dict` with the number of removed files for each split
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.cleanup_cache_files()
+ {'test': 0, 'train': 0, 'validation': 0}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
+
+ def __repr__(self):
+ repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
+ repr = re.sub(r"^", " " * 4, repr, 0, re.M)
+ return f"DatasetDict({{\n{repr}\n}})"
+
+ def cast(self, features: Features) -> "DatasetDict":
+ """
+ Cast the dataset to a new set of features.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ You can also remove a column using [`Dataset.map`] with `feature` but `cast`
+ is in-place (doesn't copy the data to a new dataset) and is thus faster.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name and order of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds["train"].features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
+
+ def cast_column(self, column: str, feature) -> "DatasetDict":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature ([`Feature`]):
+ Target feature.
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
+ """
+ Remove one or several column(s) from each split in the dataset
+ and the features associated to the column(s).
+
+ The transformation is applied to all the splits of the dataset dictionary.
+
+ You can also remove a column using [`Dataset.map`] with `remove_columns` but the present method
+ is in-place (doesn't copy the data to a new dataset) and is thus faster.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.remove_columns("label")
+ DatasetDict({
+ train: Dataset({
+ features: ['text'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()})
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict":
+ """
+ Rename a column in the dataset and move the features associated to the original column under the new column name.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ You can also rename a column using [`~Dataset.map`] with `remove_columns` but the present method:
+ - takes care of moving the original features under the new column name.
+ - doesn't copy the data to a new dataset and is thus much faster.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.rename_column("label", "label_new")
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict(
+ {
+ k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names.
+
+ Returns:
+ [`DatasetDict`]: A copy of the dataset with renamed columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
+ DatasetDict({
+ train: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()})
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
+ """Select one or several column(s) from each split in the dataset and
+ the features associated to the column(s).
+
+ The transformation is applied to all the splits of the dataset
+ dictionary.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.select_columns("text")
+ DatasetDict({
+ train: Dataset({
+ features: ['text'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
+
+ def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict":
+ """Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.
+
+ Args:
+ column (`str`):
+ The name of the column to cast.
+ include_nulls (`bool`, defaults to `False`):
+ Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("boolq")
+ >>> ds["train"].features
+ {'answer': Value(dtype='bool', id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ >>> ds = ds.class_encode_column("answer")
+ >>> ds["train"].features
+ {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict(
+ {k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()}
+ )
+
+ @contextlib.contextmanager
+ def formatted_as(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """To be used in a `with` statement. Set `__getitem__` return format (type and columns).
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+ """
+ self._check_values_type()
+ old_format_type = {k: dataset._format_type for k, dataset in self.items()}
+ old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
+ old_format_columns = {k: dataset._format_columns for k, dataset in self.items()}
+ old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()}
+ try:
+ self.set_format(type, columns, output_all_columns, **format_kwargs)
+ yield
+ finally:
+ for k, dataset in self.items():
+ dataset.set_format(
+ old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k]
+ )
+
+ def set_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns).
+ The format is set for every dataset in the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects),
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
+ gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
+
+ `new formatted columns = (all columns - previously unformatted columns)`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ ```
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+
+ def reset_format(self):
+ """Reset `__getitem__` return format to python objects and all columns.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Same as `self.set_format()`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ >>> ds.reset_format()
+ >>> ds["train"].format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ ```
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format()
+
+ def set_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
+ The transform is set for every dataset in the dataset dictionary
+ As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
+
+ Args:
+ transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
+ A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
+ This function is applied right before returning the objects in ``__getitem__``.
+ columns (`List[str]`, optional): columns to format in the output
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
+ If set to True, then the other un-formatted columns are kept with the output of the transform.
+
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ) -> "DatasetDict":
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+ The format is set for every dataset in the dataset dictionary.
+
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
+
+ Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds["train"].format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'tensorflow'}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+ return dataset
+
+ def with_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ) -> "DatasetDict":
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+ The transform is set for every dataset in the dataset dictionary
+
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
+
+ Args:
+ transform (`Callable`, *optional*):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to `True`, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> def encode(example):
+ ... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
+ >>> ds = ds.with_transform(encode)
+ >>> ds["train"][0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ 'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432,
+ 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119,
+ 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190,
+ 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117,
+ 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137,
+ 188, 1566, 7912, 14516, 6997, 119, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0])}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
+ return dataset
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ desc: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Apply a function to all the elements in the table (individually or in batches)
+ and update the table (if function does updated examples).
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`callable`): with one of the following signature:
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`,
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, default `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`[datasets.Features]`, *optional*, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Disallow null values in the table.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while mapping examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> ds["train"][0:3]["text"]
+ ['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
+ 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
+ 'Review: effective but too-tepid biopic']
+
+ # process a batch of examples
+ >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
+ # set number of processors
+ >>> ds = ds.map(add_prefix, num_proc=4)
+ ```
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.map(
+ function=function,
+ with_indices=with_indices,
+ with_rank=with_rank,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ desc=desc,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ desc: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Apply a filter function to all the elements in the table in batches
+ and update the table so that the dataset only includes examples according to the filter function.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`Callable`): Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ If no function is provided, defaults to an always `True` function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if chaching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while filtering examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.filter(lambda x: x["label"] == 1)
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 4265
+ })
+ validation: Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.filter(
+ function=function,
+ with_indices=with_indices,
+ with_rank=with_rank,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ desc=desc,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def flatten_indices(
+ self,
+ keep_in_memory: bool = False,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ num_proc: Optional[int] = None,
+ new_fingerprint: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Create and cache a new Dataset by flattening the indices mapping.
+
+ Args:
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_names (`Dict[str, str]`, *optional*, default `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Allow null values in the table.
+ num_proc (`int`, optional, default `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.flatten_indices(
+ keep_in_memory=keep_in_memory,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ num_proc=num_proc,
+ new_fingerprint=new_fingerprint,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def sort(
+ self,
+ column_names: Union[str, Sequence[str]],
+ reverse: Union[bool, Sequence[bool]] = False,
+ kind="deprecated",
+ null_placement: str = "at_end",
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "DatasetDict":
+ """Create a new dataset sorted according to a single or multiple columns.
+
+ Args:
+ column_names (`Union[str, Sequence[str]]`):
+ Column name(s) to sort by.
+ reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
+ If `True`, sort by descending order rather than ascending. If a single bool is provided,
+ the value is applied to the sorting of all column names. Otherwise a list of bools with the
+ same length and order as column_names must be provided.
+ kind (`str`, *optional*):
+ Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
+ The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general,
+ the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
+
+
+ `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
+
+
+ null_placement (`str`, defaults to `at_end`):
+ Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the sorted indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the sorted indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ Higher value gives smaller cache files, lower value consume less temporary memory.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes')
+ >>> ds['train']['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> sorted_ds = ds.sort('label')
+ >>> sorted_ds['train']['label'][:10]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
+ >>> another_sorted_ds['train']['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ ```
+ """
+ self._check_values_type()
+ if indices_cache_file_names is None:
+ indices_cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.sort(
+ column_names=column_names,
+ reverse=reverse,
+ kind=kind,
+ null_placement=null_placement,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ indices_cache_file_name=indices_cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def shuffle(
+ self,
+ seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None,
+ seed: Optional[int] = None,
+ generators: Optional[Dict[str, np.random.Generator]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "DatasetDict":
+ """Create a new Dataset where the rows are shuffled.
+
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Currently shuffling uses numpy random generators.
+ You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
+
+ Args:
+ seeds (`Dict[str, int]` or `int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ You can provide one `seed` per dataset in the dataset dictionary.
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
+ generators (`Dict[str, *optional*, np.random.Generator]`):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ You have to provide one `generator` per dataset in the dataset dictionary.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ indices_cache_file_names (`Dict[str, str]`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mappings instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"]["label"][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+
+ # set a seed
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> shuffled_ds["train"]["label"][:10]
+ [0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
+ ```
+ """
+ self._check_values_type()
+ if seed is not None and seeds is not None:
+ raise ValueError("Please specify seed or seeds, but not both")
+ seeds = seed if seed is not None else seeds
+ if seeds is None:
+ seeds = {k: None for k in self}
+ elif not isinstance(seeds, dict):
+ seeds = {k: seeds for k in self}
+ if generators is None:
+ generators = {k: None for k in self}
+ if indices_cache_file_names is None:
+ indices_cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.shuffle(
+ seed=seeds[k],
+ generator=generators[k],
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ indices_cache_file_name=indices_cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def save_to_disk(
+ self,
+ dataset_dict_path: PathLike,
+ fs="deprecated",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_shards: Optional[Dict[str, int]] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ ):
+ """
+ Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`.
+
+ For [`Image`] and [`Audio`] data:
+
+ All the Image() and Audio() data are stored in the arrow files.
+ If you want to store paths or urls, please use the Value("string") type.
+
+ Args:
+ dataset_dict_path (`str`):
+ Path (e.g. `dataset/train`) or remote URI
+ (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be
+ saved to.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"50MB"`).
+ num_shards (`Dict[str, int]`, *optional*):
+ Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
+ You need to provide the number of shards for each dataset in the dataset dictionary.
+ Use a dictionary to define a different num_shards for each split.
+
+
+ num_proc (`int`, *optional*, default `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```python
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory")
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_dict_path, storage_options=storage_options)
+
+ if num_shards is None:
+ num_shards = {k: None for k in self}
+ elif not isinstance(num_shards, dict):
+ raise ValueError(
+ "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
+ )
+
+ fs.makedirs(dataset_dict_path, exist_ok=True)
+
+ with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f:
+ json.dump({"splits": list(self)}, f)
+ for k, dataset in self.items():
+ dataset.save_to_disk(
+ posixpath.join(dataset_dict_path, k),
+ num_shards=num_shards.get(k),
+ max_shard_size=max_shard_size,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ )
+
+ @staticmethod
+ def load_from_disk(
+ dataset_dict_path: PathLike,
+ fs="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ storage_options: Optional[dict] = None,
+ ) -> "DatasetDict":
+ """
+ Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_dict_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
+ of the dataset dict directory where the dataset dict will be loaded from.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the
+ dataset will not be copied in-memory unless explicitly enabled by setting
+ `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
+ [improve performance](../cache#improve-performance) section.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> ds = load_from_disk('path/to/dataset/directory')
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, [dataset_dict_path] = fsspec.get_fs_token_paths(dataset_dict_path, storage_options=storage_options)
+
+ dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME)
+ dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME)
+ if not fs.isfile(dataset_dict_json_path):
+ if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path):
+ raise FileNotFoundError(
+ f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`."
+ )
+
+ with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f:
+ splits = json.load(f)["splits"]
+
+ dataset_dict = DatasetDict()
+ for k in splits:
+ dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k)
+ dataset_dict[k] = Dataset.load_from_disk(
+ dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options
+ )
+ return dataset_dict
+
+ @staticmethod
+ def from_csv(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from CSV file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the CSV file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`pandas.read_csv`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetReader
+
+ return CsvDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @staticmethod
+ def from_json(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from JSON Lines file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the JSON Lines file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`JsonConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetReader
+
+ return JsonDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @staticmethod
+ def from_parquet(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ columns: Optional[List[str]] = None,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from Parquet file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the CSV file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ columns (`List[str]`, *optional*):
+ If not `None`, only these columns will be read from the file.
+ A column name may be a prefix of a nested field, e.g. 'a' will select
+ 'a.b', 'a.c', and 'a.d.e'.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`ParquetConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetReader
+
+ return ParquetDatasetReader(
+ path_or_paths,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ columns=columns,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_text(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from text file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the text file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`TextConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.text import TextDatasetReader
+
+ return TextDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @deprecated()
+ @is_documented_by(Dataset.prepare_for_task)
+ def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "DatasetDict":
+ self._check_values_type()
+ return DatasetDict({k: dataset.prepare_for_task(task=task, id=id) for k, dataset in self.items()})
+
+ @is_documented_by(Dataset.align_labels_with_mapping)
+ def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict":
+ self._check_values_type()
+ return DatasetDict(
+ {
+ k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column)
+ for k, dataset in self.items()
+ }
+ )
+
+ def push_to_hub(
+ self,
+ repo_id,
+ config_name: str = "default",
+ set_default: Optional[bool] = None,
+ data_dir: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ commit_description: Optional[str] = None,
+ private: Optional[bool] = False,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ branch="deprecated",
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[Dict[str, int]] = None,
+ embed_external_files: bool = True,
+ ) -> CommitInfo:
+ """Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
+ The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
+
+ Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
+
+ The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
+ data, the Parquet files will store the bytes of your images or audio files.
+ You can disable this by setting `embed_external_files` to False.
+
+ Args:
+ repo_id (`str`):
+ The ID of the repository to push to in the following format: `/` or
+ `/`. Also accepts ``, which will default to the namespace
+ of the logged-in user.
+ config_name (`str`):
+ Configuration name of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
+ data_dir (`str`, *optional*):
+ Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
+ from "default", else "data".
+
+
+ commit_message (`str`, *optional*):
+ Message to commit while pushing. Will default to `"Upload dataset"`.
+ commit_description (`str`, *optional*):
+ Description of the commit that will be created.
+ Additionally, description of the PR if a PR is created (`create_pr` is True).
+
+
+ private (`bool`, *optional*):
+ Whether the dataset repository should be set to private or not. Only affects repository creation:
+ a repository that already exists will not be affected by that parameter.
+ token (`str`, *optional*):
+ An optional authentication token for the Hugging Face Hub. If no token is passed, will default
+ to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
+ if no token is passed and the user is not logged-in.
+ revision (`str`, *optional*):
+ Branch to push the uploaded files to. Defaults to the `"main"` branch.
+
+
+ branch (`str`, *optional*):
+ The git branch on which to push the dataset. This defaults to the default branch as specified
+ in your repository, which defaults to `"main"`.
+
+
+
+ `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0.
+
+
+ create_pr (`bool`, *optional*, defaults to `False`):
+ Whether to create a PR with the uploaded files or directly commit.
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"500MB"` or `"1GB"`).
+ num_shards (`Dict[str, int]`, *optional*):
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
+ Use a dictionary to define a different num_shards for each split.
+
+
+ embed_external_files (`bool`, defaults to `True`):
+ Whether to embed file bytes in the shards.
+ In particular, this will do the following before the push for the fields of type:
+
+ - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
+
+ Return:
+ huggingface_hub.CommitInfo
+
+ Example:
+
+ ```python
+ >>> dataset_dict.push_to_hub("/")
+ >>> dataset_dict.push_to_hub("/", private=True)
+ >>> dataset_dict.push_to_hub("/", max_shard_size="1GB")
+ >>> dataset_dict.push_to_hub("/", num_shards={"train": 1024, "test": 8})
+ ```
+
+ If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
+
+ ```python
+ >>> english_dataset.push_to_hub("/", "en")
+ >>> french_dataset.push_to_hub("/", "fr")
+ >>> # later
+ >>> english_dataset = load_dataset("/", "en")
+ >>> french_dataset = load_dataset("/", "fr")
+ ```
+ """
+
+ if num_shards is None:
+ num_shards = {k: None for k in self}
+ elif not isinstance(num_shards, dict):
+ raise ValueError(
+ "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
+ )
+
+ if branch != "deprecated":
+ warnings.warn(
+ "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'revision={branch}' instead.",
+ FutureWarning,
+ )
+ revision = branch
+
+ self._check_values_type()
+ self._check_values_features()
+ total_uploaded_size = 0
+ total_dataset_nbytes = 0
+ info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
+ info_to_dump.config_name = config_name
+ info_to_dump.splits = SplitDict()
+
+ for split in self.keys():
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ repo_url = api.create_repo(
+ repo_id,
+ token=token,
+ repo_type="dataset",
+ private=private,
+ exist_ok=True,
+ )
+ repo_id = repo_url.repo_id
+
+ if revision is not None:
+ api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
+
+ if not data_dir:
+ data_dir = config_name if config_name != "default" else "data" # for backward compatibility
+
+ additions = []
+ for split in self.keys():
+ logger.info(f"Pushing split {split} to the Hub.")
+ # The split=key needs to be removed before merging
+ split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub(
+ repo_id,
+ data_dir=data_dir,
+ split=split,
+ token=token,
+ revision=revision,
+ create_pr=create_pr,
+ max_shard_size=max_shard_size,
+ num_shards=num_shards.get(split),
+ embed_external_files=embed_external_files,
+ )
+ additions += split_additions
+ total_uploaded_size += uploaded_size
+ total_dataset_nbytes += dataset_nbytes
+ info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
+ info_to_dump.download_checksums = None
+ info_to_dump.download_size = total_uploaded_size
+ info_to_dump.dataset_size = total_dataset_nbytes
+ info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
+
+ # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
+ # and delete old split shards (if they exist)
+ repo_with_dataset_card, repo_with_dataset_infos = False, False
+ repo_splits = [] # use a list to keep the order of the splits
+ deletions = []
+ repo_files_to_add = [addition.path_in_repo for addition in additions]
+ for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token):
+ if repo_file.rfilename == config.REPOCARD_FILENAME:
+ repo_with_dataset_card = True
+ elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
+ repo_with_dataset_infos = True
+ elif (
+ repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
+ and repo_file.rfilename not in repo_files_to_add
+ ):
+ deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
+ elif fnmatch.fnmatch(
+ repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
+ ):
+ repo_split = string_to_dict(
+ repo_file.rfilename,
+ glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
+ )["split"]
+ if repo_split not in repo_splits:
+ repo_splits.append(split)
+
+ # get the info from the README to update them
+ if repo_with_dataset_card:
+ dataset_card_path = api.hf_hub_download(
+ repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
+ )
+ dataset_card = DatasetCard.load(Path(dataset_card_path))
+ dataset_card_data = dataset_card.data
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ # get the deprecated dataset_infos.json to update them
+ elif repo_with_dataset_infos:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
+ if not metadata_configs and repo_splits:
+ default_metadata_configs_to_dump = {
+ "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
+ }
+ MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
+ metadata_config_to_dump = {
+ "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
+ }
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
+ # push to the deprecated dataset_infos.json
+ if repo_with_dataset_infos:
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_infos[config_name] = asdict(info_to_dump)
+ buffer = BytesIO()
+ buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
+ )
+ # push to README
+ DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
+ MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
+ dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
+ )
+
+ commit_message = commit_message if commit_message is not None else "Upload dataset"
+ if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
+ commit_info = api.create_commit(
+ repo_id,
+ operations=additions + deletions,
+ commit_message=commit_message,
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ else:
+ logger.info(
+ f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
+ )
+ num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
+ for i in range(0, num_commits):
+ operations = additions[
+ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
+ ] + (deletions if i == 0 else [])
+ commit_info = api.create_commit(
+ repo_id,
+ operations=operations,
+ commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ logger.info(
+ f"Commit #{i+1} completed"
+ + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ + "."
+ )
+ return commit_info
+
+
+class IterableDatasetDict(dict):
+ def __repr__(self):
+ repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
+ repr = re.sub(r"^", " " * 4, repr, 0, re.M)
+ return f"IterableDatasetDict({{\n{repr}\n}})"
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ ) -> "IterableDatasetDict":
+ """
+ Return a dataset with the specified format.
+ This method only supports the "torch" format for now.
+ The format is set to all the datasets of the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*, defaults to `None`):
+ If set to "torch", the returned dataset
+ will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> from transformers import AutoTokenizer
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
+ >>> def encode(example):
+ ... return tokenizer(examples["text"], truncation=True, padding="max_length")
+ >>> ds = ds.map(encode, batched=True, remove_columns=["text"])
+ >>> ds = ds.with_format("torch")
+ ```
+ """
+ return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()})
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: int = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDatasetDict":
+ """
+ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
+ If your function returns a column that already exists, then it overwrites it.
+ The function is applied on-the-fly on the examples when iterating over the dataset.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
+ - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`, *optional*, defaults to `None`):
+ Function applied on-the-fly on the examples when you iterate on the dataset.
+ It must have one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the `batch_size` should be
+ dropped instead of being processed by the function.
+ remove_columns (`[List[str]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> next(iter(ds["train"]))
+ {'label': 1,
+ 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.map(
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ fn_kwargs=fn_kwargs,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices=False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDatasetDict":
+ """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
+ The filtering is done on-the-fly when iterating over the dataset.
+ The filtering is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`Callable`):
+ Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
+ - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
+ - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
+ - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
+
+ If no function is provided, defaults to an always True function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.filter(lambda x: x["label"] == 0)
+ >>> list(ds["train"].take(3))
+ [{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
+ {'label': 0,
+ 'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
+ {'label': 0,
+ 'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.filter(
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ fn_kwargs=fn_kwargs,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def shuffle(
+ self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
+ ) -> "IterableDatasetDict":
+ """
+ Randomly shuffles the elements of this dataset.
+ The shuffling is applied to all the datasets of the dataset dictionary.
+
+ This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
+ replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
+ equal to the full size of the dataset is required.
+
+ For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
+ initially select a random element from only the first 1000 elements in the buffer. Once an element is
+ selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
+ maintaining the 1000 element buffer.
+
+ If the dataset is made of several shards, it also does `shuffle` the order of the shards.
+ However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
+ then the order of the shards is kept unchanged.
+
+ Args:
+ seed (`int`, *optional*, defaults to `None`):
+ Random seed that will be used to shuffle the dataset.
+ It is used to sample from the shuffle buffer and also to shuffle the data shards.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ buffer_size (`int`, defaults to `1000`):
+ Size of the buffer.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> list(ds["train"].take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.shuffle(seed=42)
+ >>> list(ds["train"].take(3))
+ [{'label': 1,
+ 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
+ {'label': 1,
+ 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
+ {'label': 1,
+ 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+ The renaming is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.rename_column("text", "movie_review")
+ >>> next(iter(ds["train"]))
+ {'label': 1,
+ 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+ The renaming is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with renamed columns
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
+ >>> next(iter(ds["train"]))
+ {'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
+ 'rating': 1}
+ ```
+ """
+ return IterableDatasetDict(
+ {k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
+ )
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+ The removal is done on-the-fly on the examples when iterating over the dataset.
+ The removal is applied to all the datasets of the dataset dictionary.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.remove_columns("label")
+ >>> next(iter(ds["train"]))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()})
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
+ """Select one or several column(s) in the dataset and the features
+ associated to them. The selection is done on-the-fly on the examples
+ when iterating over the dataset. The selection is applied to all the
+ datasets of the dataset dictionary.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.select("text")
+ >>> next(iter(ds["train"]))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
+
+ def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
+ """Cast column to feature for decoding.
+ The type casting is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature ([`Feature`]):
+ Target feature.
+
+ Returns:
+ [`IterableDatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ return IterableDatasetDict(
+ {k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
+ )
+
+ def cast(
+ self,
+ features: Features,
+ ) -> "IterableDatasetDict":
+ """
+ Cast the dataset to a new set of features.
+ The type casting is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ features (`Features`):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds["train"].features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/distributed.py b/env-llmeval/lib/python3.10/site-packages/datasets/distributed.py
new file mode 100644
index 0000000000000000000000000000000000000000..e036fabaf2cf6231ae6a3ca2c443100ccbb0b4d5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/distributed.py
@@ -0,0 +1,39 @@
+from typing import TypeVar
+
+from .arrow_dataset import Dataset, _split_by_node_map_style_dataset
+from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset
+
+
+DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
+
+
+def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType:
+ """
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+
+ For map-style datasets:
+
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
+
+ For iterable datasets:
+
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
+ then the shards are evenly assigned across the nodes, which is the most optimized.
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
+
+ Args:
+ dataset ([`Dataset`] or [`IterableDataset`]):
+ The dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`.
+ """
+ if isinstance(dataset, Dataset):
+ return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size)
+ else:
+ return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/exceptions.py b/env-llmeval/lib/python3.10/site-packages/datasets/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..619f2a10117dc16c20002b4cdcaf17a7f2350a8c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/exceptions.py
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+from typing import Any, Dict, List, Optional, Union
+
+from huggingface_hub import HfFileSystem
+
+from . import config
+from .table import CastError
+from .utils.track import TrackedIterable, tracked_list, tracked_str
+
+
+class DatasetsError(Exception):
+ """Base class for exceptions in this library."""
+
+
+class DefunctDatasetError(DatasetsError):
+ """The dataset has been defunct."""
+
+
+class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
+ """FileNotFoundError raised by this library."""
+
+
+class DataFilesNotFoundError(FileNotFoundDatasetsError):
+ """No (supported) data files found."""
+
+
+class DatasetNotFoundError(FileNotFoundDatasetsError):
+ """Dataset not found.
+
+ Raised when trying to access:
+ - a missing dataset, or
+ - a private/gated dataset and the user is not authenticated.
+ """
+
+
+class DatasetBuildError(DatasetsError):
+ pass
+
+
+class ManualDownloadError(DatasetBuildError):
+ pass
+
+
+class FileFormatError(DatasetBuildError):
+ pass
+
+
+class DatasetGenerationError(DatasetBuildError):
+ pass
+
+
+class DatasetGenerationCastError(DatasetGenerationError):
+ @classmethod
+ def from_cast_error(
+ cls,
+ cast_error: CastError,
+ builder_name: str,
+ gen_kwargs: Dict[str, Any],
+ token: Optional[Union[bool, str]],
+ ) -> "DatasetGenerationCastError":
+ explanation_message = (
+ f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
+ )
+ formatted_tracked_gen_kwargs: List[str] = []
+ for gen_kwarg in gen_kwargs.values():
+ if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterable)):
+ continue
+ while isinstance(gen_kwarg, (tracked_list, TrackedIterable)) and gen_kwarg.last_item is not None:
+ gen_kwarg = gen_kwarg.last_item
+ if isinstance(gen_kwarg, tracked_str):
+ gen_kwarg = gen_kwarg.get_origin()
+ if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
+ resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
+ gen_kwarg = "hf://" + resolved_path.unresolve()
+ if "@" + resolved_path.revision in gen_kwarg:
+ gen_kwarg = (
+ gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ + f" (at revision {resolved_path.revision})"
+ )
+ formatted_tracked_gen_kwargs.append(str(gen_kwarg))
+ if formatted_tracked_gen_kwargs:
+ explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
+ help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
+ return cls("An error occurred while generating the dataset" + explanation_message + help_message)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..15aefa5f42a3a2a3c8ca8ba282996d421d5d7d60
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__init__.py
@@ -0,0 +1,86 @@
+import importlib
+import shutil
+import threading
+import warnings
+from typing import List
+
+import fsspec
+import fsspec.asyn
+from fsspec.implementations.local import LocalFileSystem
+
+from ..utils.deprecation_utils import deprecated
+from . import compression
+
+
+_has_s3fs = importlib.util.find_spec("s3fs") is not None
+
+if _has_s3fs:
+ from .s3filesystem import S3FileSystem # noqa: F401
+
+COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
+ compression.Bz2FileSystem,
+ compression.GzipFileSystem,
+ compression.Lz4FileSystem,
+ compression.XzFileSystem,
+ compression.ZstdFileSystem,
+]
+
+# Register custom filesystems
+for fs_class in COMPRESSION_FILESYSTEMS:
+ if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
+ warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
+ fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
+
+
+@deprecated(
+ "This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
+)
+def extract_path_from_uri(dataset_path: str) -> str:
+ """
+ Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
+ """
+ if "://" in dataset_path:
+ dataset_path = dataset_path.split("://")[1]
+ return dataset_path
+
+
+def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
+ """
+ Checks if `fs` is a remote filesystem.
+
+ Args:
+ fs (`fsspec.spec.AbstractFileSystem`):
+ An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
+ """
+ return not isinstance(fs, LocalFileSystem)
+
+
+def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
+ """
+ Renames the file `src` in `fs` to `dst`.
+ """
+ if not is_remote_filesystem(fs):
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
+ shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
+ else:
+ fs.mv(src, dst, recursive=True)
+
+
+def _reset_fsspec_lock() -> None:
+ """
+ Clear reference to the loop and thread.
+ This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
+ Only required for fsspec >= 0.9.0
+ See https://github.com/fsspec/gcsfs/issues/379
+ """
+ if hasattr(fsspec.asyn, "reset_lock"):
+ # for future fsspec>2022.05.0
+ fsspec.asyn.reset_lock()
+ else:
+ fsspec.asyn.iothread[0] = None
+ fsspec.asyn.loop[0] = None
+ fsspec.asyn.lock = threading.Lock()
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e66a4b526eddcfa8dfd81aee1c477737bc1231d5
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1253a835064bc90cedfc1e2834e3403be051a59
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2603beb7ea80d4d5dccf6c5e75a828dfb2998bca
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/compression.py b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/compression.py
new file mode 100644
index 0000000000000000000000000000000000000000..d64872040b0abe0cd0bcfdfe004c2279213edafd
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/compression.py
@@ -0,0 +1,178 @@
+import os
+from typing import Optional
+
+import fsspec
+from fsspec.archive import AbstractArchiveFileSystem
+from fsspec.utils import DEFAULT_BLOCK_SIZE
+
+
+class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
+ """Read contents of compressed file as a filesystem with one file inside."""
+
+ root_marker = ""
+ protocol: str = (
+ None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
+ )
+ compression: str = None # compression type in fsspec. ex: "gzip"
+ extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
+
+ def __init__(
+ self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
+ ):
+ """
+ The compressed file system can be instantiated from any compressed file.
+ It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
+
+ The single file inside the filesystem is named after the compresssed file,
+ without the compression extension at the end of the filename.
+
+ Args:
+ fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
+ mode (:obj:``str``): Currently, only 'rb' accepted
+ target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
+ target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
+ """
+ super().__init__(self, **kwargs)
+ # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
+ self.file = fsspec.open(
+ fo,
+ mode="rb",
+ protocol=target_protocol,
+ compression=self.compression,
+ client_kwargs={
+ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
+ "trust_env": True, # Enable reading proxy env variables.
+ **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
+ },
+ **(target_options or {}),
+ )
+ self.compressed_name = os.path.basename(self.file.path.split("::")[0])
+ self.uncompressed_name = (
+ self.compressed_name[: self.compressed_name.rindex(".")]
+ if "." in self.compressed_name
+ else self.compressed_name
+ )
+ self.dir_cache = None
+
+ @classmethod
+ def _strip_protocol(cls, path):
+ # compressed file paths are always relative to the archive root
+ return super()._strip_protocol(path).lstrip("/")
+
+ def _get_dirs(self):
+ if self.dir_cache is None:
+ f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
+ self.dir_cache = {f["name"]: f}
+
+ def cat(self, path: str):
+ return self.file.open().read()
+
+ def _open(
+ self,
+ path: str,
+ mode: str = "rb",
+ block_size=None,
+ autocommit=True,
+ cache_options=None,
+ **kwargs,
+ ):
+ path = self._strip_protocol(path)
+ if mode != "rb":
+ raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
+ return self.file.open()
+
+
+class Bz2FileSystem(BaseCompressedFileFileSystem):
+ """Read contents of BZ2 file as a filesystem with one file inside."""
+
+ protocol = "bz2"
+ compression = "bz2"
+ extension = ".bz2"
+
+
+class GzipFileSystem(BaseCompressedFileFileSystem):
+ """Read contents of GZIP file as a filesystem with one file inside."""
+
+ protocol = "gzip"
+ compression = "gzip"
+ extension = ".gz"
+
+
+class Lz4FileSystem(BaseCompressedFileFileSystem):
+ """Read contents of LZ4 file as a filesystem with one file inside."""
+
+ protocol = "lz4"
+ compression = "lz4"
+ extension = ".lz4"
+
+
+class XzFileSystem(BaseCompressedFileFileSystem):
+ """Read contents of .xz (LZMA) file as a filesystem with one file inside."""
+
+ protocol = "xz"
+ compression = "xz"
+ extension = ".xz"
+
+
+class ZstdFileSystem(BaseCompressedFileFileSystem):
+ """
+ Read contents of zstd file as a filesystem with one file inside.
+
+ Note that reading in binary mode with fsspec isn't supported yet:
+ https://github.com/indygreg/python-zstandard/issues/136
+ """
+
+ protocol = "zstd"
+ compression = "zstd"
+ extension = ".zst"
+
+ def __init__(
+ self,
+ fo: str,
+ mode: str = "rb",
+ target_protocol: Optional[str] = None,
+ target_options: Optional[dict] = None,
+ block_size: int = DEFAULT_BLOCK_SIZE,
+ **kwargs,
+ ):
+ super().__init__(
+ fo=fo,
+ mode=mode,
+ target_protocol=target_protocol,
+ target_options=target_options,
+ block_size=block_size,
+ **kwargs,
+ )
+ # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
+ #
+ # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
+ # out.close = close
+ # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
+ #
+ # see https://github.com/intake/filesystem_spec/issues/725
+ _enter = self.file.__enter__
+
+ class WrappedFile:
+ def __init__(self, file_):
+ self._file = file_
+
+ def __enter__(self):
+ self._file.__enter__()
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self._file.__exit__(*args, **kwargs)
+
+ def __iter__(self):
+ return iter(self._file)
+
+ def __next__(self):
+ return next(self._file)
+
+ def __getattr__(self, attr):
+ return getattr(self._file, attr)
+
+ def fixed_enter(*args, **kwargs):
+ return WrappedFile(_enter(*args, **kwargs))
+
+ self.file.__enter__ = fixed_enter
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d204f1f8738e51411cacac0201fd67e5c185422
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py
@@ -0,0 +1,116 @@
+import s3fs
+
+from ..utils.deprecation_utils import deprecated
+
+
+@deprecated("Use s3fs.S3FileSystem instead.")
+class S3FileSystem(s3fs.S3FileSystem):
+ """
+ `datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html).
+
+ Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`.
+
+ Args:
+ anon (`bool`, default to `False`):
+ Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given,
+ or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order).
+ key (`str`):
+ If not anonymous, use this access key ID, if specified.
+ secret (`str`):
+ If not anonymous, use this secret access key, if specified.
+ token (`str`):
+ If not anonymous, use this security token, if specified.
+ use_ssl (`bool`, defaults to `True`):
+ Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is
+ also set in `client_kwargs`, the value set in `client_kwargs` will take priority.
+ s3_additional_kwargs (`dict`):
+ Parameters that are used when calling S3 API methods. Typically used for things
+ like ServerSideEncryption.
+ client_kwargs (`dict`):
+ Parameters for the botocore client.
+ requester_pays (`bool`, defaults to `False`):
+ Whether `RequesterPays` buckets are supported.
+ default_block_size (`int`):
+ If given, the default block size value used for `open()`, if no specific value is given at all time.
+ The built-in default is 5MB.
+ default_fill_cache (`bool`, defaults to `True`):
+ Whether to use cache filling with open by default. Refer to `S3File.open`.
+ default_cache_type (`str`, defaults to `bytes`):
+ If given, the default `cache_type` value used for `open()`. Set to `none` if no
+ caching is desired. See fsspec's documentation for other available `cache_type` values.
+ version_aware (`bool`, defaults to `False`):
+ Whether to support bucket versioning. If enable this will require the user to have
+ the necessary IAM permissions for dealing with versioned objects.
+ cache_regions (`bool`, defaults to `False`):
+ Whether to cache bucket regions. Whenever a new bucket is used, it will
+ first find out which region it belongs to and then use the client for that region.
+ asynchronous (`bool`, defaults to `False`):
+ Whether this instance is to be used from inside coroutines.
+ config_kwargs (`dict`):
+ Parameters passed to `botocore.client.Config`.
+ **kwargs:
+ Other parameters for core session.
+ session (`aiobotocore.session.AioSession`):
+ Session to be used for all connections. This session will be used inplace of creating
+ a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`.
+ skip_instance_cache (`bool`):
+ Control reuse of instances. Passed on to `fsspec`.
+ use_listings_cache (`bool`):
+ Control reuse of directory listings. Passed on to `fsspec`.
+ listings_expiry_time (`int` or `float`):
+ Control reuse of directory listings. Passed on to `fsspec`.
+ max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`.
+
+ Examples:
+
+ Listing files from public S3 bucket.
+
+ ```py
+ >>> import datasets
+ >>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP
+ >>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP
+ ['dataset_info.json.json','dataset.arrow','state.json']
+ ```
+
+ Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`.
+
+ ```py
+ >>> import datasets
+ >>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP
+ ['dataset_info.json.json','dataset.arrow','state.json']
+ ```
+
+ Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`.
+
+ ```py
+ >>> import botocore
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> s3_session = botocore.session.Session(profile_name='my_profile_name')
+ >>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP
+ ```
+
+ Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`].
+
+ ```py
+ >>> from datasets import load_from_disk
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
+ >>> print(len(dataset))
+ 25000
+ ```
+
+ Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`].
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> dataset = load_dataset("imdb")
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
+ ```
+ """
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/fingerprint.py b/env-llmeval/lib/python3.10/site-packages/datasets/fingerprint.py
new file mode 100644
index 0000000000000000000000000000000000000000..b26caff328bd799c508641fd7289c8c01a28d5f8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/fingerprint.py
@@ -0,0 +1,494 @@
+import inspect
+import os
+import random
+import shutil
+import tempfile
+import weakref
+from functools import wraps
+from pathlib import Path
+from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import xxhash
+
+from . import config
+from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH
+from .utils._dill import dumps
+from .utils.deprecation_utils import deprecated
+from .utils.logging import get_logger
+
+
+if TYPE_CHECKING:
+ from .arrow_dataset import Dataset
+
+
+logger = get_logger(__name__)
+
+
+# Fingerprinting allows to have one deterministic fingerprint per dataset state.
+# A dataset fingerprint is updated after each transform.
+# Re-running the same transforms on a dataset in a different session results in the same fingerprint.
+# This is possible thanks to a custom hashing function that works with most python objects.
+
+# Fingerprinting is the main mechanism that enables caching.
+# The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+
+#################
+# Caching
+#################
+
+_CACHING_ENABLED = True
+_TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional["_TempCacheDir"] = None
+_DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None
+
+
+class _TempCacheDir:
+ """
+ A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files
+ before deleting the directory itself to avoid permission errors on Windows.
+ """
+
+ def __init__(self):
+ self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX)
+ self._finalizer = weakref.finalize(self, self._cleanup)
+
+ def _cleanup(self):
+ for dset in get_datasets_with_cache_file_in_temp_dir():
+ dset.__del__()
+ if os.path.exists(self.name):
+ try:
+ shutil.rmtree(self.name)
+ except Exception as e:
+ raise OSError(
+ f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually."
+ ) from e
+
+ def cleanup(self):
+ if self._finalizer.detach():
+ self._cleanup()
+
+
+def maybe_register_dataset_for_temp_dir_deletion(dataset):
+ """
+ This function registers the datasets that have cache files in _TEMP_DIR_FOR_TEMP_CACHE_FILES in order
+ to properly delete them before deleting the temporary directory.
+ The temporary directory _TEMP_DIR_FOR_TEMP_CACHE_FILES is used when caching is disabled.
+ """
+ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
+ return
+
+ global _DATASETS_WITH_TABLE_IN_TEMP_DIR
+ if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None:
+ _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet()
+ if any(
+ Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file["filename"]).parents
+ for cache_file in dataset.cache_files
+ ):
+ _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset)
+
+
+def get_datasets_with_cache_file_in_temp_dir():
+ return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else []
+
+
+def enable_caching():
+ """
+ When applying transforms on a dataset, the data are stored in cache files.
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
+ after each transform.
+
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
+ More precisely, if the caching is disabled:
+ - cache files are always recreated
+ - cache files are written to a temporary directory that is deleted when session closes
+ - cache files are named using a random hash instead of the dataset fingerprint
+ - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
+ the `download_mode` parameter in [`~datasets.load_dataset`].
+ """
+ global _CACHING_ENABLED
+ _CACHING_ENABLED = True
+
+
+def disable_caching():
+ """
+ When applying transforms on a dataset, the data are stored in cache files.
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
+ after each transform.
+
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
+ More precisely, if the caching is disabled:
+ - cache files are always recreated
+ - cache files are written to a temporary directory that is deleted when session closes
+ - cache files are named using a random hash instead of the dataset fingerprint
+ - use [`~datasets.Dataset.save_to_disk`] to save a transformed dataset or it will be deleted when session closes
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
+ the `download_mode` parameter in [`~datasets.load_dataset`].
+ """
+ global _CACHING_ENABLED
+ _CACHING_ENABLED = False
+
+
+@deprecated(
+ "Use datasets.enable_caching() or datasets.disable_caching() instead. This function will be removed in a future version of datasets."
+)
+def set_caching_enabled(boolean: bool):
+ """
+ When applying transforms on a dataset, the data are stored in cache files.
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
+ after each transform.
+
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
+ More precisely, if the caching is disabled:
+ - cache files are always recreated
+ - cache files are written to a temporary directory that is deleted when session closes
+ - cache files are named using a random hash instead of the dataset fingerprint
+ - use :func:`datasets.Dataset.save_to_disk` to save a transformed dataset or it will be deleted when session closes
+ - caching doesn't affect :func:`datasets.load_dataset`. If you want to regenerate a dataset from scratch you should use
+ the ``download_mode`` parameter in :func:`datasets.load_dataset`.
+ """
+ global _CACHING_ENABLED
+ _CACHING_ENABLED = bool(boolean)
+
+
+def is_caching_enabled() -> bool:
+ """
+ When applying transforms on a dataset, the data are stored in cache files.
+ The caching mechanism allows to reload an existing cache file if it's already been computed.
+
+ Reloading a dataset is possible since the cache files are named using the dataset fingerprint, which is updated
+ after each transform.
+
+ If disabled, the library will no longer reload cached datasets files when applying transforms to the datasets.
+ More precisely, if the caching is disabled:
+ - cache files are always recreated
+ - cache files are written to a temporary directory that is deleted when session closes
+ - cache files are named using a random hash instead of the dataset fingerprint
+ - use [`~datasets.Dataset.save_to_disk`]] to save a transformed dataset or it will be deleted when session closes
+ - caching doesn't affect [`~datasets.load_dataset`]. If you want to regenerate a dataset from scratch you should use
+ the `download_mode` parameter in [`~datasets.load_dataset`].
+ """
+ global _CACHING_ENABLED
+ return bool(_CACHING_ENABLED)
+
+
+def get_temporary_cache_files_directory() -> str:
+ """Return a directory that is deleted when session closes."""
+ global _TEMP_DIR_FOR_TEMP_CACHE_FILES
+ if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None:
+ _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempCacheDir()
+ return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name
+
+
+#################
+# Hashing
+#################
+
+
+@deprecated("Use `copyreg.pickle` to register a custom reducer.")
+def hashregister(*types):
+ def proxy(func):
+ for t in types:
+ Hasher.dispatch[t] = func
+ return func
+
+ return proxy
+
+
+class Hasher:
+ """Hasher that accepts python objects as inputs."""
+
+ dispatch: Dict = {}
+
+ def __init__(self):
+ self.m = xxhash.xxh64()
+
+ @classmethod
+ def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:
+ value = [value] if isinstance(value, bytes) else value
+ m = xxhash.xxh64()
+ for x in value:
+ m.update(x)
+ return m.hexdigest()
+
+ @classmethod
+ @deprecated("Use `Hasher.hash` instead.")
+ def hash_default(cls, value: Any) -> str:
+ return cls.hash(value)
+
+ @classmethod
+ def hash(cls, value: Any) -> str:
+ return cls.hash_bytes(dumps(value))
+
+ def update(self, value: Any) -> None:
+ header_for_update = f"=={type(value)}=="
+ value_for_update = self.hash(value)
+ self.m.update(header_for_update.encode("utf8"))
+ self.m.update(value_for_update.encode("utf-8"))
+
+ def hexdigest(self) -> str:
+ return self.m.hexdigest()
+
+
+#################
+# Fingerprinting
+#################
+
+fingerprint_rng = random.Random()
+# we show a warning only once when fingerprinting fails to avoid spam
+fingerprint_warnings: Dict[str, bool] = {}
+
+
+def generate_fingerprint(dataset: "Dataset") -> str:
+ state = dataset.__dict__
+ hasher = Hasher()
+ for key in sorted(state):
+ if key == "_fingerprint":
+ continue
+ hasher.update(key)
+ hasher.update(state[key])
+ # hash data files last modification timestamps as well
+ for cache_file in dataset.cache_files:
+ hasher.update(os.path.getmtime(cache_file["filename"]))
+ return hasher.hexdigest()
+
+
+def generate_random_fingerprint(nbits: int = 64) -> str:
+ return f"{fingerprint_rng.getrandbits(nbits):0{nbits//4}x}"
+
+
+def update_fingerprint(fingerprint, transform, transform_args):
+ global fingerprint_warnings
+ hasher = Hasher()
+ hasher.update(fingerprint)
+ try:
+ hasher.update(transform)
+ except: # noqa various errors might raise here from pickle or dill
+ if _CACHING_ENABLED:
+ if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
+ logger.warning(
+ f"Transform {transform} couldn't be hashed properly, a random hash was used instead. "
+ "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
+ "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
+ "This warning is only showed once. Subsequent hashing failures won't be showed."
+ )
+ fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
+ else:
+ logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.")
+ else:
+ logger.info(
+ f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
+ )
+
+ return generate_random_fingerprint()
+ for key in sorted(transform_args):
+ hasher.update(key)
+ try:
+ hasher.update(transform_args[key])
+ except: # noqa various errors might raise here from pickle or dill
+ if _CACHING_ENABLED:
+ if not fingerprint_warnings.get("update_fingerprint_transform_hash_failed", False):
+ logger.warning(
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. "
+ "Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. "
+ "If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. "
+ "This warning is only showed once. Subsequent hashing failures won't be showed."
+ )
+ fingerprint_warnings["update_fingerprint_transform_hash_failed"] = True
+ else:
+ logger.info(
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead."
+ )
+ else:
+ logger.info(
+ f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled."
+ )
+ return generate_random_fingerprint()
+ return hasher.hexdigest()
+
+
+def validate_fingerprint(fingerprint: str, max_length=64):
+ """
+ Make sure the fingerprint is a non-empty string that is not longer that max_length=64 by default,
+ so that the fingerprint can be used to name cache files without issues.
+ """
+ if not isinstance(fingerprint, str) or not fingerprint:
+ raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.")
+ for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
+ if invalid_char in fingerprint:
+ raise ValueError(
+ f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. "
+ f"They could create issues when creating cache files."
+ )
+ if len(fingerprint) > max_length:
+ raise ValueError(
+ f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}."
+ "It could create issues when creating cache files."
+ )
+
+
+def format_transform_for_fingerprint(func: Callable, version: Optional[str] = None) -> str:
+ """
+ Format a transform to the format that will be used to update the fingerprint.
+ """
+ transform = f"{func.__module__}.{func.__qualname__}"
+ if version is not None:
+ transform += f"@{version}"
+ return transform
+
+
+def format_kwargs_for_fingerprint(
+ func: Callable,
+ args: Tuple,
+ kwargs: Dict[str, Any],
+ use_kwargs: Optional[List[str]] = None,
+ ignore_kwargs: Optional[List[str]] = None,
+ randomized_function: bool = False,
+) -> Dict[str, Any]:
+ """
+ Format the kwargs of a transform to the format that will be used to update the fingerprint.
+ """
+ kwargs_for_fingerprint = kwargs.copy()
+ if args:
+ params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD]
+ args = args[1:] # assume the first argument is the dataset
+ params = params[1:]
+ kwargs_for_fingerprint.update(zip(params, args))
+ else:
+ del kwargs_for_fingerprint[
+ next(iter(inspect.signature(func).parameters))
+ ] # assume the first key is the dataset
+
+ # keep the right kwargs to be hashed to generate the fingerprint
+
+ if use_kwargs:
+ kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k in use_kwargs}
+ if ignore_kwargs:
+ kwargs_for_fingerprint = {k: v for k, v in kwargs_for_fingerprint.items() if k not in ignore_kwargs}
+ if randomized_function: # randomized functions have `seed` and `generator` parameters
+ if kwargs_for_fingerprint.get("seed") is None and kwargs_for_fingerprint.get("generator") is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ kwargs_for_fingerprint["generator"] = np.random.default_rng(seed)
+
+ # remove kwargs that are the default values
+
+ default_values = {
+ p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty
+ }
+ for default_varname, default_value in default_values.items():
+ if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value:
+ kwargs_for_fingerprint.pop(default_varname)
+ return kwargs_for_fingerprint
+
+
+def fingerprint_transform(
+ inplace: bool,
+ use_kwargs: Optional[List[str]] = None,
+ ignore_kwargs: Optional[List[str]] = None,
+ fingerprint_names: Optional[List[str]] = None,
+ randomized_function: bool = False,
+ version: Optional[str] = None,
+):
+ """
+ Wrapper for dataset transforms to update the dataset fingerprint using ``update_fingerprint``
+ Args:
+ inplace (:obj:`bool`): If inplace is True, the fingerprint of the dataset is updated inplace.
+ Otherwise, a parameter "new_fingerprint" is passed to the wrapped method that should take care of
+ setting the fingerprint of the returned Dataset.
+ use_kwargs (:obj:`List[str]`, optional): optional white list of argument names to take into account
+ to update the fingerprint to the wrapped method that should take care of
+ setting the fingerprint of the returned Dataset. By default all the arguments are used.
+ ignore_kwargs (:obj:`List[str]`, optional): optional black list of argument names to take into account
+ to update the fingerprint. Note that ignore_kwargs prevails on use_kwargs.
+ fingerprint_names (:obj:`List[str]`, optional, defaults to ["new_fingerprint"]):
+ If the dataset transforms is not inplace and returns a DatasetDict, then it can require
+ several fingerprints (one per dataset in the DatasetDict). By specifying fingerprint_names,
+ one fingerprint named after each element of fingerprint_names is going to be passed.
+ randomized_function (:obj:`bool`, defaults to False): If the dataset transform is random and has
+ optional parameters "seed" and "generator", then you can set randomized_function to True.
+ This way, even if users set "seed" and "generator" to None, then the fingerprint is
+ going to be randomly generated depending on numpy's current state. In this case, the
+ generator is set to np.random.default_rng(np.random.get_state()[1][0]).
+ version (:obj:`str`, optional): version of the transform. The version is taken into account when
+ computing the fingerprint. If a datase transform changes (or at least if the output data
+ that are cached changes), then one should increase the version. If the version stays the
+ same, then old cached data could be reused that are not compatible with the new transform.
+ It should be in the format "MAJOR.MINOR.PATCH".
+ """
+
+ if use_kwargs is not None and not isinstance(use_kwargs, list):
+ raise ValueError(f"use_kwargs is supposed to be a list, not {type(use_kwargs)}")
+
+ if ignore_kwargs is not None and not isinstance(ignore_kwargs, list):
+ raise ValueError(f"ignore_kwargs is supposed to be a list, not {type(use_kwargs)}")
+
+ if inplace and fingerprint_names:
+ raise ValueError("fingerprint_names are only used when inplace is False")
+
+ fingerprint_names = fingerprint_names if fingerprint_names is not None else ["new_fingerprint"]
+
+ def _fingerprint(func):
+ if not inplace and not all(name in func.__code__.co_varnames for name in fingerprint_names):
+ raise ValueError(f"function {func} is missing parameters {fingerprint_names} in signature")
+
+ if randomized_function: # randomized function have seed and generator parameters
+ if "seed" not in func.__code__.co_varnames:
+ raise ValueError(f"'seed' must be in {func}'s signature")
+ if "generator" not in func.__code__.co_varnames:
+ raise ValueError(f"'generator' must be in {func}'s signature")
+ # this call has to be outside the wrapper or since __qualname__ changes in multiprocessing
+ transform = format_transform_for_fingerprint(func, version=version)
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ kwargs_for_fingerprint = format_kwargs_for_fingerprint(
+ func,
+ args,
+ kwargs,
+ use_kwargs=use_kwargs,
+ ignore_kwargs=ignore_kwargs,
+ randomized_function=randomized_function,
+ )
+
+ if args:
+ dataset: Dataset = args[0]
+ args = args[1:]
+ else:
+ dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters)))
+
+ # compute new_fingerprint and add it to the args of not in-place transforms
+ if inplace:
+ new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint)
+ else:
+ for fingerprint_name in fingerprint_names: # transforms like `train_test_split` have several hashes
+ if kwargs.get(fingerprint_name) is None:
+ kwargs_for_fingerprint["fingerprint_name"] = fingerprint_name
+ kwargs[fingerprint_name] = update_fingerprint(
+ dataset._fingerprint, transform, kwargs_for_fingerprint
+ )
+ else:
+ validate_fingerprint(kwargs[fingerprint_name])
+
+ # Call actual function
+
+ out = func(dataset, *args, **kwargs)
+
+ # Update fingerprint of in-place transforms + update in-place history of transforms
+
+ if inplace: # update after calling func so that the fingerprint doesn't change if the function fails
+ dataset._fingerprint = new_fingerprint
+
+ return out
+
+ wrapper._decorator_name_ = "fingerprint"
+ return wrapper
+
+ return _fingerprint
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/info.py b/env-llmeval/lib/python3.10/site-packages/datasets/info.py
new file mode 100644
index 0000000000000000000000000000000000000000..74e9a962a0cc4cf1d6b89728fa35c164a7caa93b
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/info.py
@@ -0,0 +1,592 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""DatasetInfo and MetricInfo record information we know about a dataset and a metric.
+
+This includes things that we know about the dataset statically, i.e.:
+ - description
+ - canonical location
+ - does it have validation and tests splits
+ - size
+ - etc.
+
+This also includes the things that can and should be computed once we've
+processed the dataset as well:
+ - number of examples (in each split)
+ - etc.
+"""
+
+import copy
+import dataclasses
+import json
+import os
+import posixpath
+import warnings
+from dataclasses import dataclass
+from pathlib import Path
+from typing import ClassVar, Dict, List, Optional, Union
+
+import fsspec
+from huggingface_hub import DatasetCard, DatasetCardData
+
+from . import config
+from .features import Features, Value
+from .splits import SplitDict
+from .tasks import TaskTemplate, task_template_from_dict
+from .utils import Version
+from .utils.logging import get_logger
+from .utils.py_utils import asdict, unique_values
+
+
+logger = get_logger(__name__)
+
+
+@dataclass
+class SupervisedKeysData:
+ input: str = ""
+ output: str = ""
+
+
+@dataclass
+class DownloadChecksumsEntryData:
+ key: str = ""
+ value: str = ""
+
+
+class MissingCachedSizesConfigError(Exception):
+ """The expected cached sizes of the download file are missing."""
+
+
+class NonMatchingCachedSizesError(Exception):
+ """The prepared split doesn't have expected sizes."""
+
+
+@dataclass
+class PostProcessedInfo:
+ features: Optional[Features] = None
+ resources_checksums: Optional[dict] = None
+
+ def __post_init__(self):
+ # Convert back to the correct classes when we reload from dict
+ if self.features is not None and not isinstance(self.features, Features):
+ self.features = Features.from_dict(self.features)
+
+ @classmethod
+ def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names})
+
+
+@dataclass
+class DatasetInfo:
+ """Information about a dataset.
+
+ `DatasetInfo` documents datasets, including its name, version, and features.
+ See the constructor arguments and properties for a full list.
+
+ Not all fields are known on construction and may be updated later.
+
+ Attributes:
+ description (`str`):
+ A description of the dataset.
+ citation (`str`):
+ A BibTeX citation of the dataset.
+ homepage (`str`):
+ A URL to the official homepage for the dataset.
+ license (`str`):
+ The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
+ features ([`Features`], *optional*):
+ The features used to specify the dataset's column types.
+ post_processed (`PostProcessedInfo`, *optional*):
+ Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
+ supervised_keys (`SupervisedKeysData`, *optional*):
+ Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
+ builder_name (`str`, *optional*):
+ The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
+ config_name (`str`, *optional*):
+ The name of the configuration derived from [`BuilderConfig`].
+ version (`str` or [`Version`], *optional*):
+ The version of the dataset.
+ splits (`dict`, *optional*):
+ The mapping between split name and metadata.
+ download_checksums (`dict`, *optional*):
+ The mapping between the URL to download the dataset's checksums and corresponding metadata.
+ download_size (`int`, *optional*):
+ The size of the files to download to generate the dataset, in bytes.
+ post_processing_size (`int`, *optional*):
+ Size of the dataset in bytes after post-processing, if any.
+ dataset_size (`int`, *optional*):
+ The combined size in bytes of the Arrow tables for all splits.
+ size_in_bytes (`int`, *optional*):
+ The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
+ task_templates (`List[TaskTemplate]`, *optional*):
+ The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
+ """
+
+ # Set in the dataset scripts
+ description: str = dataclasses.field(default_factory=str)
+ citation: str = dataclasses.field(default_factory=str)
+ homepage: str = dataclasses.field(default_factory=str)
+ license: str = dataclasses.field(default_factory=str)
+ features: Optional[Features] = None
+ post_processed: Optional[PostProcessedInfo] = None
+ supervised_keys: Optional[SupervisedKeysData] = None
+ task_templates: Optional[List[TaskTemplate]] = None
+
+ # Set later by the builder
+ builder_name: Optional[str] = None
+ dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
+ config_name: Optional[str] = None
+ version: Optional[Union[str, Version]] = None
+ # Set later by `download_and_prepare`
+ splits: Optional[dict] = None
+ download_checksums: Optional[dict] = None
+ download_size: Optional[int] = None
+ post_processing_size: Optional[int] = None
+ dataset_size: Optional[int] = None
+ size_in_bytes: Optional[int] = None
+
+ _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
+ "config_name",
+ "download_size",
+ "dataset_size",
+ "features",
+ "splits",
+ ]
+
+ def __post_init__(self):
+ # Convert back to the correct classes when we reload from dict
+ if self.features is not None and not isinstance(self.features, Features):
+ self.features = Features.from_dict(self.features)
+ if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
+ self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
+ if self.version is not None and not isinstance(self.version, Version):
+ if isinstance(self.version, str):
+ self.version = Version(self.version)
+ else:
+ self.version = Version.from_dict(self.version)
+ if self.splits is not None and not isinstance(self.splits, SplitDict):
+ self.splits = SplitDict.from_split_dict(self.splits)
+ if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
+ if isinstance(self.supervised_keys, (tuple, list)):
+ self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
+ else:
+ self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
+
+ # Parse and make a list of templates
+ if self.task_templates is not None:
+ if isinstance(self.task_templates, (list, tuple)):
+ templates = [
+ template if isinstance(template, TaskTemplate) else task_template_from_dict(template)
+ for template in self.task_templates
+ ]
+ self.task_templates = [template for template in templates if template is not None]
+ elif isinstance(self.task_templates, TaskTemplate):
+ self.task_templates = [self.task_templates]
+ else:
+ template = task_template_from_dict(self.task_templates)
+ self.task_templates = [template] if template is not None else []
+
+ # Align task templates with features
+ if self.task_templates is not None:
+ self.task_templates = list(self.task_templates)
+ if self.features is not None:
+ self.task_templates = [
+ template.align_with_features(self.features) for template in (self.task_templates)
+ ]
+
+ def write_to_directory(
+ self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None
+ ):
+ """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
+
+ Args:
+ dataset_info_dir (`str`):
+ Destination directory.
+ pretty_print (`bool`, defaults to `False`):
+ If `True`, the JSON will be pretty-printed with the indent level of 4.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.info.write_to_directory("/path/to/directory/")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options)
+ with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
+ self._dump_info(f, pretty_print=pretty_print)
+ if self.license:
+ with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
+ self._dump_license(f)
+
+ def _dump_info(self, file, pretty_print=False):
+ """Dump info in `file` file-like object open in bytes mode (to support remote files)"""
+ file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8"))
+
+ def _dump_license(self, file):
+ """Dump license in `file` file-like object open in bytes mode (to support remote files)"""
+ file.write(self.license.encode("utf-8"))
+
+ @classmethod
+ def from_merge(cls, dataset_infos: List["DatasetInfo"]):
+ dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
+
+ if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos):
+ # if all dataset_infos are equal we don't need to merge. Just return the first.
+ return dataset_infos[0]
+
+ description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
+ citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
+ homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
+ license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
+ features = None
+ supervised_keys = None
+ task_templates = None
+
+ # Find common task templates across all dataset infos
+ all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None]
+ if len(all_task_templates) > 1:
+ task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:]))
+ elif len(all_task_templates):
+ task_templates = list(set(all_task_templates[0]))
+ # If no common task templates found, replace empty list with None
+ task_templates = task_templates if task_templates else None
+
+ return cls(
+ description=description,
+ citation=citation,
+ homepage=homepage,
+ license=license,
+ features=features,
+ supervised_keys=supervised_keys,
+ task_templates=task_templates,
+ )
+
+ @classmethod
+ def from_directory(
+ cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None
+ ) -> "DatasetInfo":
+ """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
+
+ This function updates all the dynamically generated fields (num_examples,
+ hash, time of creation,...) of the [`DatasetInfo`].
+
+ This will overwrite all previous metadata.
+
+ Args:
+ dataset_info_dir (`str`):
+ The directory containing the metadata file. This
+ should be the root directory of a specific dataset version.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetInfo
+ >>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options)
+ logger.info(f"Loading Dataset info from {dataset_info_dir}")
+ if not dataset_info_dir:
+ raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
+ with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
+ dataset_info_dict = json.load(f)
+ return cls.from_dict(dataset_info_dict)
+
+ @classmethod
+ def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names})
+
+ def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
+ self_dict = self.__dict__
+ self_dict.update(
+ **{
+ k: copy.deepcopy(v)
+ for k, v in other_dataset_info.__dict__.items()
+ if (v is not None or not ignore_none)
+ }
+ )
+
+ def copy(self) -> "DatasetInfo":
+ return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
+
+ def _to_yaml_dict(self) -> dict:
+ yaml_dict = {}
+ dataset_info_dict = asdict(self)
+ for key in dataset_info_dict:
+ if key in self._INCLUDED_INFO_IN_YAML:
+ value = getattr(self, key)
+ if hasattr(value, "_to_yaml_list"): # Features, SplitDict
+ yaml_dict[key] = value._to_yaml_list()
+ elif hasattr(value, "_to_yaml_string"): # Version
+ yaml_dict[key] = value._to_yaml_string()
+ else:
+ yaml_dict[key] = value
+ return yaml_dict
+
+ @classmethod
+ def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
+ yaml_data = copy.deepcopy(yaml_data)
+ if yaml_data.get("features") is not None:
+ yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
+ if yaml_data.get("splits") is not None:
+ yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in yaml_data.items() if k in field_names})
+
+
+class DatasetInfosDict(Dict[str, DatasetInfo]):
+ def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
+ total_dataset_infos = {}
+ dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
+ dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)
+ if not overwrite:
+ total_dataset_infos = self.from_directory(dataset_infos_dir)
+ total_dataset_infos.update(self)
+ if os.path.exists(dataset_infos_path):
+ # for backward compatibility, let's update the JSON file if it exists
+ with open(dataset_infos_path, "w", encoding="utf-8") as f:
+ dataset_infos_dict = {
+ config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
+ }
+ json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
+ # Dump the infos in the YAML part of the README.md file
+ if os.path.exists(dataset_readme_path):
+ dataset_card = DatasetCard.load(dataset_readme_path)
+ dataset_card_data = dataset_card.data
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ if total_dataset_infos:
+ total_dataset_infos.to_dataset_card_data(dataset_card_data)
+ dataset_card = (
+ DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
+ )
+ dataset_card.save(Path(dataset_readme_path))
+
+ @classmethod
+ def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
+ logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
+ # Load the info from the YAML part of README.md
+ if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)):
+ dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data
+ if "dataset_info" in dataset_card_data:
+ return cls.from_dataset_card_data(dataset_card_data)
+ if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
+ # this is just to have backward compatibility with dataset_infos.json files
+ with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
+ return cls(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ else:
+ return cls()
+
+ @classmethod
+ def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
+ if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
+ if isinstance(dataset_card_data["dataset_info"], list):
+ return cls(
+ {
+ dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
+ dataset_info_yaml_dict
+ )
+ for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
+ }
+ )
+ else:
+ dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
+ dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
+ return cls({dataset_info.config_name: dataset_info})
+ else:
+ return cls()
+
+ def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
+ if self:
+ # first get existing metadata info
+ if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
+ dataset_metadata_infos = {
+ dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
+ }
+ elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
+ dataset_metadata_infos = {
+ config_metadata["config_name"]: config_metadata
+ for config_metadata in dataset_card_data["dataset_info"]
+ }
+ else:
+ dataset_metadata_infos = {}
+ # update/rewrite existing metadata info with the one to dump
+ total_dataset_infos = {
+ **dataset_metadata_infos,
+ **{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
+ }
+ # the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
+ for config_name, dset_info_yaml_dict in total_dataset_infos.items():
+ dset_info_yaml_dict["config_name"] = config_name
+ if len(total_dataset_infos) == 1:
+ # use a struct instead of a list of configurations, since there's only one
+ dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
+ config_name = dataset_card_data["dataset_info"].pop("config_name", None)
+ if config_name != "default":
+ # if config_name is not "default" preserve it and put at the first position
+ dataset_card_data["dataset_info"] = {
+ "config_name": config_name,
+ **dataset_card_data["dataset_info"],
+ }
+ else:
+ dataset_card_data["dataset_info"] = []
+ for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
+ # add the config_name field in first position
+ dataset_info_yaml_dict.pop("config_name", None)
+ dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
+ dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
+
+
+@dataclass
+class MetricInfo:
+ """Information about a metric.
+
+ `MetricInfo` documents a metric, including its name, version, and features.
+ See the constructor arguments and properties for a full list.
+
+ Note: Not all fields are known on construction and may be updated later.
+ """
+
+ # Set in the dataset scripts
+ description: str
+ citation: str
+ features: Features
+ inputs_description: str = dataclasses.field(default_factory=str)
+ homepage: str = dataclasses.field(default_factory=str)
+ license: str = dataclasses.field(default_factory=str)
+ codebase_urls: List[str] = dataclasses.field(default_factory=list)
+ reference_urls: List[str] = dataclasses.field(default_factory=list)
+ streamable: bool = False
+ format: Optional[str] = None
+
+ # Set later by the builder
+ metric_name: Optional[str] = None
+ config_name: Optional[str] = None
+ experiment_id: Optional[str] = None
+
+ def __post_init__(self):
+ if self.format is not None:
+ for key, value in self.features.items():
+ if not isinstance(value, Value):
+ raise ValueError(
+ f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
+ f"Here {key} is an instance of {value.__class__.__name__}"
+ )
+
+ def write_to_directory(self, metric_info_dir, pretty_print=False):
+ """Write `MetricInfo` as JSON to `metric_info_dir`.
+ Also save the license separately in LICENCE.
+ If `pretty_print` is True, the JSON will be pretty-printed with the indent level of 4.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.info.write_to_directory("/path/to/directory/")
+ ```
+ """
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
+ json.dump(asdict(self), f, indent=4 if pretty_print else None)
+
+ if self.license:
+ with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
+ f.write(self.license)
+
+ @classmethod
+ def from_directory(cls, metric_info_dir) -> "MetricInfo":
+ """Create MetricInfo from the JSON file in `metric_info_dir`.
+
+ Args:
+ metric_info_dir: `str` The directory containing the metadata file. This
+ should be the root directory of a specific dataset version.
+
+ Example:
+
+ ```py
+ >>> from datasets import MetricInfo
+ >>> metric_info = MetricInfo.from_directory("/path/to/directory/")
+ ```
+ """
+ logger.info(f"Loading Metric info from {metric_info_dir}")
+ if not metric_info_dir:
+ raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.")
+
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
+ metric_info_dict = json.load(f)
+ return cls.from_dict(metric_info_dict)
+
+ @classmethod
+ def from_dict(cls, metric_info_dict: dict) -> "MetricInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/inspect.py b/env-llmeval/lib/python3.10/site-packages/datasets/inspect.py
new file mode 100644
index 0000000000000000000000000000000000000000..f976073ac977f04c2023139685d245e8bda58b90
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/inspect.py
@@ -0,0 +1,581 @@
+# Copyright 2020 The HuggingFace Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""List and inspect datasets."""
+
+import inspect
+import os
+import shutil
+import warnings
+from pathlib import Path, PurePath
+from typing import Dict, List, Mapping, Optional, Sequence, Union
+
+import huggingface_hub
+
+from . import config
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadMode
+from .download.streaming_download_manager import StreamingDownloadManager
+from .info import DatasetInfo
+from .load import (
+ dataset_module_factory,
+ get_dataset_builder_class,
+ import_main_class,
+ load_dataset_builder,
+ metric_module_factory,
+)
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import relative_to_absolute_path
+from .utils.logging import get_logger
+from .utils.version import Version
+
+
+logger = get_logger(__name__)
+
+
+class SplitsNotFoundError(ValueError):
+ pass
+
+
+@deprecated("Use 'huggingface_hub.list_datasets' instead.")
+def list_datasets(with_community_datasets=True, with_details=False):
+ """List all the datasets scripts available on the Hugging Face Hub.
+
+ Args:
+ with_community_datasets (`bool`, *optional*, defaults to `True`):
+ Include the community provided datasets.
+ with_details (`bool`, *optional*, defaults to `False`):
+ Return the full details on the datasets instead of only the short name.
+
+ Example:
+
+ ```py
+ >>> from datasets import list_datasets
+ >>> list_datasets()
+ ['acronym_identification',
+ 'ade_corpus_v2',
+ 'adversarial_qa',
+ 'aeslc',
+ 'afrikaans_ner_corpus',
+ 'ag_news',
+ ...
+ ]
+ ```
+ """
+ datasets = huggingface_hub.list_datasets(full=with_details)
+ if not with_community_datasets:
+ datasets = [dataset for dataset in datasets if "/" not in dataset.id]
+ if not with_details:
+ datasets = [dataset.id for dataset in datasets]
+ return list(datasets)
+
+
+@deprecated(
+ "Use 'evaluate.list_evaluation_modules' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
+)
+def list_metrics(with_community_metrics=True, with_details=False):
+ """List all the metrics script available on the Hugging Face Hub.
+
+
+
+ Use `evaluate.list_evaluation_modules` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
+ with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
+
+ Example:
+
+ ```py
+ >>> from datasets import list_metrics
+ >>> list_metrics()
+ ['accuracy',
+ 'bertscore',
+ 'bleu',
+ 'bleurt',
+ 'cer',
+ 'chrf',
+ ...
+ ]
+ ```
+ """
+ metrics = huggingface_hub.list_metrics()
+ if not with_community_metrics:
+ metrics = [metric for metric in metrics if "/" not in metric.id]
+ if not with_details:
+ metrics = [metric.id for metric in metrics]
+ return metrics
+
+
+@deprecated("Clone the dataset repository from the Hugging Face Hub instead.")
+def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
+ """
+ Allow inspection/modification of a dataset script by copying on local drive at local_path.
+
+ Args:
+ path (`str`): Path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name
+ as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`.
+ local_path (`str`):
+ Path to the local folder to copy the dataset script to.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ **download_kwargs (additional keyword arguments):
+ Optional arguments for [`DownloadConfig`] which will override
+ the attributes of `download_config` if supplied.
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ if os.path.isfile(path):
+ path = str(Path(path).parent)
+ if os.path.isdir(path):
+ shutil.copytree(path, local_path, dirs_exist_ok=True)
+ else:
+ huggingface_hub.HfApi(endpoint=config.HF_ENDPOINT, token=download_config.token).snapshot_download(
+ repo_id=path, repo_type="dataset", local_dir=local_path, force_download=download_config.force_download
+ )
+ print(
+ f"The dataset {path} can be inspected at {local_path}. "
+ f'You can modify this loading script if it has one and use it with `datasets.load_dataset("{PurePath(local_path).as_posix()}")`.'
+ )
+
+
+@deprecated(
+ "Use 'evaluate.inspect_evaluation_module' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
+)
+def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
+ r"""
+ Allow inspection/modification of a metric script by copying it on local drive at local_path.
+
+
+
+ Use `evaluate.inspect_evaluation_module` instead, from the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
+ local_path (``str``): path to the local folder to copy the datset script to.
+ download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters.
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
+ """
+ metric_module = metric_module_factory(path, download_config=download_config, **download_kwargs)
+ metric_cls = import_main_class(metric_module.module_path, dataset=False)
+ module_source_path = inspect.getsourcefile(metric_cls)
+ module_source_dirpath = os.path.dirname(module_source_path)
+ for dirpath, dirnames, filenames in os.walk(module_source_dirpath):
+ dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath))
+ os.makedirs(dst_dirpath, exist_ok=True)
+ # skipping hidden directories; prune the search
+ dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))]
+ for filename in filenames:
+ shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename))
+ shutil.copystat(dirpath, dst_dirpath)
+ local_path = relative_to_absolute_path(local_path)
+ print(
+ f"The processing scripts for metric {path} can be inspected at {local_path}. "
+ f"The main class is in {module_source_dirpath}. "
+ f'You can modify this processing scripts and use it with `datasets.load_metric("{PurePath(local_path).as_posix()}")`.'
+ )
+
+
+def get_dataset_infos(
+ path: str,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+):
+ """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or``'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Optional attributes for builder class which will override the attributes if supplied.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_infos
+ >>> get_dataset_infos('rotten_tomatoes')
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ config_names = get_dataset_config_names(
+ path=path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ data_files=data_files,
+ token=token,
+ )
+ return {
+ config_name: get_dataset_config_info(
+ path=path,
+ config_name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ for config_name in config_names
+ }
+
+
+def get_dataset_config_names(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ **download_kwargs,
+):
+ """Get the list of available config names for a particular dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ **download_kwargs (additional keyword arguments):
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
+ for example `token`.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_config_names
+ >>> get_dataset_config_names("glue")
+ ['cola',
+ 'sst2',
+ 'mrpc',
+ 'qqp',
+ 'stsb',
+ 'mnli',
+ 'mnli_mismatched',
+ 'mnli_matched',
+ 'qnli',
+ 'rte',
+ 'wnli',
+ 'ax']
+ ```
+ """
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ data_files=data_files,
+ **download_kwargs,
+ )
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
+ return list(builder_cls.builder_configs.keys()) or [
+ dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default")
+ ]
+
+
+def get_dataset_default_config_name(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ **download_kwargs,
+) -> Optional[str]:
+ """Get the default config name for a particular dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ **download_kwargs (additional keyword arguments):
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
+ for example `token`.
+
+ Returns:
+ Optional[str]
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_default_config_name
+ >>> get_dataset_default_config_name("openbookqa")
+ 'main'
+ ```
+ """
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ data_files=data_files,
+ **download_kwargs,
+ )
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
+ builder_configs = list(builder_cls.builder_configs.keys())
+ if builder_configs:
+ default_config_name = builder_configs[0] if len(builder_configs) == 1 else None
+ else:
+ default_config_name = "default"
+ return builder_cls.DEFAULT_CONFIG_NAME or default_config_name
+
+
+def get_dataset_config_info(
+ path: str,
+ config_name: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+) -> DatasetInfo:
+ """Get the meta information (DatasetInfo) about a dataset for a particular config
+
+ Args:
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
+ config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
+ data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
+ download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
+
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ builder = load_dataset_builder(
+ path,
+ name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ info = builder.info
+ if info.splits is None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ if token is not None:
+ download_config.token = token
+ builder._check_manual_download(
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
+ )
+ try:
+ info.splits = {
+ split_generator.name: {"name": split_generator.name, "dataset_name": path}
+ for split_generator in builder._split_generators(
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
+ )
+ }
+ except Exception as err:
+ raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
+ return info
+
+
+def get_dataset_split_names(
+ path: str,
+ config_name: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+):
+ """Get the list of available splits for a particular config and dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ config_name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Optional attributes for builder class which will override the attributes if supplied.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_split_names
+ >>> get_dataset_split_names('rotten_tomatoes')
+ ['train', 'validation', 'test']
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ info = get_dataset_config_info(
+ path,
+ config_name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ return list(info.splits.keys())
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/iterable_dataset.py b/env-llmeval/lib/python3.10/site-packages/datasets/iterable_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..31329de9c3183b0f913e6044c010c60206e185df
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/iterable_dataset.py
@@ -0,0 +1,2388 @@
+import copy
+import itertools
+import sys
+import warnings
+from collections import Counter
+from copy import deepcopy
+from dataclasses import dataclass
+from functools import partial
+from itertools import cycle, islice
+from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
+
+import numpy as np
+import pyarrow as pa
+
+from . import config
+from .arrow_dataset import Dataset, DatasetInfoMixin
+from .features import Features
+from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects
+from .filesystems import _reset_fsspec_lock
+from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter
+from .info import DatasetInfo
+from .splits import NamedSplit
+from .table import cast_table_to_features, read_schema_from_file, table_cast
+from .utils.logging import get_logger
+from .utils.py_utils import Literal
+from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs
+
+
+logger = get_logger(__name__)
+
+Key = Union[int, str]
+
+
+def identity_func(x):
+ return x
+
+
+def _rename_columns_fn(example: Dict, column_mapping: Dict[str, str]):
+ if any(col not in example for col in column_mapping):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset."
+ )
+ if any(col in example for col in column_mapping.values()):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset."
+ )
+ return {
+ new_column_name: example[original_column_name]
+ for original_column_name, new_column_name in column_mapping.items()
+ }
+
+
+def add_column_fn(example: Dict, idx: int, name: str, column: List[Dict]):
+ if name in example:
+ raise ValueError(f"Error when adding {name}: column {name} is already in the dataset.")
+ return {name: column[idx]}
+
+
+def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features] = None) -> Features:
+ pa_table = pa.Table.from_pydict(batch)
+ if try_features is not None:
+ try:
+ pa_table = table_cast(pa_table, pa.schema(try_features.type))
+ except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError):
+ pass
+ return Features.from_arrow_schema(pa_table.schema)
+
+
+def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]:
+ # we order the columns by order of appearance
+ # to do so, we use a dict as an ordered set
+ cols = {col: None for example in examples for col in example}
+ # when an example is missing a column, we set the value to None with .get()
+ arrays = [[example.get(col) for example in examples] for col in cols]
+ return dict(zip(cols, arrays))
+
+
+def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]:
+ """Convert a batch (dict of examples) to examples list"""
+ n_examples = len(batch[next(iter(batch))])
+ for i in range(n_examples):
+ yield {col: array[i] for col, array in batch.items()}
+
+
+class _HasNextIterator(Iterator):
+ """Iterator with an hasnext() function. Taken from https://stackoverflow.com/questions/1966591/has-next-in-python-iterators."""
+
+ def __init__(self, it):
+ self.it = iter(it)
+ self._hasnext = None
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._hasnext:
+ result = self._thenext
+ else:
+ result = next(self.it)
+ self._hasnext = None
+ return result
+
+ def hasnext(self):
+ if self._hasnext is None:
+ try:
+ self._thenext = next(self.it)
+ except StopIteration:
+ self._hasnext = False
+ else:
+ self._hasnext = True
+ return self._hasnext
+
+
+def _convert_to_arrow(
+ iterable: Iterable[Tuple[Key, dict]],
+ batch_size: int,
+ drop_last_batch: bool = False,
+) -> Iterator[Tuple[Key, pa.Table]]:
+ """Convert and group examples in Arrow tables of size `batch_size`.
+
+ Args:
+ iterable (`Iterable[Tuple[Key, dict]]`):
+ An examples iterable containing tuples (example_key, example) of type (int/str, dict)
+ batch_size (`Optional[int]`):
+ Size of each sub-table to yield. If None or <= 0, yields the full table.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ if batch_size is None or batch_size <= 0:
+ yield (
+ "all",
+ pa.Table.from_pylist(cast_to_python_objects([example for _, example in iterable], only_1d_for_numpy=True)),
+ )
+ return
+ iterator = iter(iterable)
+ for key, example in iterator:
+ iterator_batch = islice(iterator, batch_size - 1)
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ if len(key_examples_list) < batch_size and drop_last_batch:
+ return
+ keys, examples = zip(*key_examples_list)
+ new_key = "_".join(str(key) for key in keys)
+ yield new_key, pa.Table.from_pylist(cast_to_python_objects(examples, only_1d_for_numpy=True))
+
+
+def _batch_arrow_tables(
+ iterable: Iterable[Tuple[Key, pa.Table]],
+ batch_size: Optional[int],
+ drop_last_batch: bool = False,
+) -> Iterator[Tuple[Key, pa.Table]]:
+ """Iterate over sub-tables of size `batch_size`.
+
+ Args:
+ iterable (`Iterable[Tuple[Key, pa.Table]]`):
+ A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table)
+ batch_size (`Optional[int]`):
+ Size of each sub-table to yield. If None or <= 0, yields the full table.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ if batch_size is None or batch_size <= 0:
+ yield "all", pa.concat_tables([pa_table for _, pa_table in iterable])
+ return
+ keys_buffer = []
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ for key, pa_table in iterable:
+ for chunk in pa_table.to_reader(max_chunksize=batch_size):
+ if len(chunk) == 0:
+ continue
+ elif chunks_buffer_size + len(chunk) < batch_size:
+ keys_buffer.append(key)
+ chunks_buffer.append(chunk)
+ chunks_buffer_size += len(chunk)
+ continue
+ elif chunks_buffer_size + len(chunk) == batch_size:
+ keys_buffer.append(key)
+ chunks_buffer.append(chunk)
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+ keys_buffer = []
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ else:
+ cropped_chunk_length = batch_size - chunks_buffer_size
+ keys_buffer.append(f"{key}[:{cropped_chunk_length}]")
+ chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+ keys_buffer = [f"{key}[{cropped_chunk_length}:]"]
+ chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
+ chunks_buffer_size = len(chunk) - cropped_chunk_length
+ if not drop_last_batch and chunks_buffer:
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+
+
+class _BaseExamplesIterable:
+ """Base class for the examples iterable used by an IterableDataset"""
+
+ def __init__(self) -> None:
+ self.iter_arrow: Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]] = None
+
+ def __iter__(self) -> Iterator[Tuple[Key, dict]]:
+ """An examples iterable should yield tuples (example_key, example) of type (int/str, dict)"""
+ raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet")
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable":
+ """
+ Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable.
+ If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self.
+ """
+ raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet")
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "_BaseExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet")
+
+ def split_shard_indices_by_worker(self, worker_id: int, num_workers: int) -> List[int]:
+ return list(range(worker_id, self.n_shards, num_workers))
+
+ @property
+ def n_shards(self) -> int:
+ raise NotImplementedError(f"{type(self)} doesn't implement n_shards yet")
+
+
+class ExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict):
+ super().__init__()
+ self.generate_examples_fn = generate_examples_fn
+ self.kwargs = kwargs
+
+ def __iter__(self):
+ yield from self.generate_examples_fn(**self.kwargs)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable":
+ return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
+ """Keep only the requested shard."""
+ gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
+ shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
+ requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
+ return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs)
+
+ @property
+ def n_shards(self) -> int:
+ return _number_of_shards_in_gen_kwargs(self.kwargs)
+
+
+class ShuffledDataSourcesExamplesIterable(ExamplesIterable):
+ def __init__(
+ self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator
+ ):
+ super().__init__(generate_examples_fn, kwargs)
+ self.generator = deepcopy(generator)
+
+ def __iter__(self):
+ """Shuffle the kwargs order to shuffle shards"""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ yield from self.generate_examples_fn(**kwargs_with_shuffled_shards)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
+ """Keep only the requested shard."""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources(
+ worker_id, num_workers
+ )
+
+
+class ArrowExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict):
+ super().__init__()
+ self.generate_tables_fn = generate_tables_fn
+ self.kwargs = kwargs
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ formatter = PythonFormatter()
+ for key, pa_table in self.generate_tables_fn(**self.kwargs):
+ for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
+ formatted_batch = formatter.format_batch(pa_subtable)
+ for example in _batch_to_examples(formatted_batch):
+ yield key, example
+
+ def _iter_arrow(self):
+ yield from self.generate_tables_fn(**self.kwargs)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable":
+ return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
+ """Keep only the requested shard."""
+ gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
+ shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
+ requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
+ return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs)
+
+ @property
+ def n_shards(self) -> int:
+ return _number_of_shards_in_gen_kwargs(self.kwargs)
+
+
+class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable):
+ def __init__(
+ self,
+ generate_tables_fn: Callable[..., Tuple[Key, pa.Table]],
+ kwargs: dict,
+ generator: np.random.Generator,
+ ):
+ super().__init__(generate_tables_fn, kwargs)
+ self.generator = deepcopy(generator)
+
+ def __iter__(self):
+ """Shuffle the kwargs order to shuffle shards"""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ formatter = PythonFormatter()
+ for key, pa_table in self.generate_tables_fn(**kwargs_with_shuffled_shards):
+ for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
+ formatted_batch = formatter.format_batch(pa_subtable)
+ for example in _batch_to_examples(formatted_batch):
+ yield key, example
+
+ def _iter_arrow(self):
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ yield from self.generate_tables_fn(**kwargs_with_shuffled_shards)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
+ """Keep only the requested shard."""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources(
+ worker_id, num_workers
+ )
+
+
+class SelectColumnsIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.column_names = column_names
+ if self.ex_iterable.iter_arrow:
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ for idx, row in self.ex_iterable:
+ yield idx, {c: row[c] for c in self.column_names}
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ for idx, pa_table in self.ex_iterable.iter_arrow():
+ yield idx, pa_table.select(self.column_names)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SelectColumnsIterable":
+ return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "SelectColumnsIterable":
+ return SelectColumnsIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), self.column_names)
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class StepExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.step = step
+ self.offset = offset
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ ex_iterator = iter(self.ex_iterable)
+ while True:
+ batch = list(islice(ex_iterator, self.step))
+ if len(batch) > self.offset:
+ yield batch[self.offset]
+ else:
+ break
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "StepExamplesIterable":
+ return StepExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "StepExamplesIterable":
+ return StepExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers), step=self.step, offset=self.offset
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterables: List[_BaseExamplesIterable],
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ ):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ self.stopping_strategy = stopping_strategy
+
+ # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
+ # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
+ self.bool_strategy_func = np.all if (stopping_strategy == "all_exhausted") else np.any
+ # TODO(QL): implement iter_arrow
+
+ def _get_indices_iterator(self):
+ # this is an infinite iterator to keep track of which iterator we want to pick examples from
+ return cycle(range(len(self.ex_iterables)))
+
+ def __iter__(self):
+ iterators = [_HasNextIterator(ex_iterable) for ex_iterable in self.ex_iterables]
+
+ indices_iterator = self._get_indices_iterator()
+
+ is_exhausted = np.full(len(self.ex_iterables), False)
+ for i in indices_iterator:
+ try: # let's pick one example from the iterator at index i
+ yield next(iterators[i])
+
+ # it will resume from the yield at the next call so that we can directly test if the iterable is exhausted and if we need to break out of the loop
+ if not iterators[i].hasnext():
+ is_exhausted[i] = True
+
+ if self.bool_strategy_func(is_exhausted):
+ # if the stopping criteria is met, break the main for loop
+ break
+ # otherwise reinitialise the iterator and yield the first example
+ iterators[i] = _HasNextIterator(self.ex_iterables[i])
+
+ except StopIteration:
+ # here it means that the i-th iterabledataset is empty, i.e we never have the occasion to yield an element of the i-th dataset.
+ # we still check if the stopping criteria is met and if we break out of the loop in case of an oversampling strategy
+ is_exhausted[i] = True
+
+ if self.bool_strategy_func(is_exhausted):
+ # if the stopping criteria is met, break the main for loop
+ break
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable":
+ """Shuffle each underlying examples iterable."""
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
+ return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy)
+
+ @property
+ def n_shards(self) -> int:
+ return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "CyclingMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return CyclingMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
+ stopping_strategy=self.stopping_strategy,
+ )
+
+
+class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ """
+ VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables.
+ It doesn't require the examples iterables to always yield the same columns.
+ Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
+
+ For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
+ We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
+
+ Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
+ This is done with `_apply_feature_types_on_example`.
+ """
+
+ def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ if all(ex_iterable.iter_arrow is not None for ex_iterable in ex_iterables):
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ for ex_iterable in self.ex_iterables:
+ yield from ex_iterable
+
+ def _iter_arrow(self):
+ for ex_iterable in self.ex_iterables:
+ yield from ex_iterable.iter_arrow()
+
+ def shuffle_data_sources(
+ self, generator: np.random.Generator
+ ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
+ """Shuffle the list of examples iterable, as well as each underlying examples iterable."""
+ rng = deepcopy(generator)
+ ex_iterables = list(self.ex_iterables)
+ rng.shuffle(ex_iterables)
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables]
+ return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+
+ @property
+ def n_shards(self) -> int:
+ return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
+
+ def shard_data_sources(
+ self, worker_id: int, num_workers: int
+ ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return VerticallyConcatenatedMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
+ )
+
+
+def _check_column_names(column_names: List[str]):
+ """Check the column names to make sure they don't contain duplicates."""
+ counter = Counter(column_names)
+ if not all(count == 1 for count in counter.values()):
+ duplicated_columns = [col for col in counter if counter[col] > 1]
+ raise ValueError(
+ f"The examples iterables can't have duplicated columns but columns {duplicated_columns} are duplicated."
+ )
+
+
+class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ """
+ HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables.
+ It also checks that there are no duplicate columns (otherwise we don't know which one to keep).
+ This check is done once when yielding the first example.
+
+ However it doesn't fill missing columns with None.
+ Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
+
+ For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
+ We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
+
+ Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
+ This is done with `_apply_feature_types_on_example`.
+ """
+
+ def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
+ for i in itertools.count():
+ keys = []
+ examples = []
+ for ex_iterator in list(ex_iterators):
+ try:
+ key, example = next(ex_iterator)
+ keys.append(key)
+ examples.append(example)
+ except StopIteration:
+ ex_iterators.remove(ex_iterator)
+ if ex_iterators:
+ if i == 0:
+ _check_column_names([column_name for example in examples for column_name in example])
+ new_example = {}
+ for example in examples:
+ new_example.update(example)
+ new_key = "_".join(str(key) for key in keys)
+ yield new_key, new_example
+ else:
+ break
+
+ def shuffle_data_sources(
+ self, generator: np.random.Generator
+ ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would break the alignment between them."""
+ return self
+
+ @property
+ def n_shards(self) -> int:
+ return 1
+
+ def shard_data_sources(
+ self, worker_id: int, num_workers: int
+ ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return HorizontallyConcatenatedMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
+ )
+
+
+class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable):
+ def __init__(
+ self,
+ ex_iterables: List[_BaseExamplesIterable],
+ generator: np.random.Generator,
+ probabilities: Optional[List[float]] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ ):
+ super().__init__(ex_iterables, stopping_strategy)
+ self.generator = deepcopy(generator)
+ self.probabilities = probabilities
+ # TODO(QL): implement iter_arrow
+
+ @staticmethod
+ def _iter_random_indices(
+ rng: np.random.Generator,
+ num_sources: int,
+ random_batch_size=1000,
+ p: Optional[List[float]] = None,
+ ) -> Iterator[int]:
+ """Get an infinite iterator that randomly samples the index of the source to pick examples from."""
+ if p is None:
+ while True:
+ yield from (int(i) for i in rng.integers(0, num_sources, size=random_batch_size))
+ else:
+ while True:
+ yield from (int(i) for i in rng.choice(num_sources, size=random_batch_size, p=p))
+
+ def _get_indices_iterator(self):
+ rng = deepcopy(self.generator)
+ # this is an infinite iterator that randomly samples the index of the source to pick examples from
+ return self._iter_random_indices(rng, len(self.ex_iterables), p=self.probabilities)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable":
+ """Shuffle the data sources of each wrapped examples iterable."""
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
+ return RandomlyCyclingMultiSourcesExamplesIterable(
+ ex_iterables,
+ generator=generator,
+ probabilities=self.probabilities,
+ stopping_strategy=self.stopping_strategy,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "RandomlyCyclingMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return RandomlyCyclingMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
+ self.generator,
+ self.probabilities,
+ self.stopping_strategy,
+ )
+
+
+class MappedExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[List[str]] = None,
+ fn_kwargs: Optional[dict] = None,
+ formatting: Optional["FormattingConfig"] = None,
+ format_type="deprecated",
+ ):
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.drop_last_batch = drop_last_batch
+ self.remove_columns = remove_columns
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+ self.fn_kwargs = fn_kwargs or {}
+ self.formatting = formatting
+ if self.formatting and self.formatting.format_type == "arrow":
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ if self.formatting and self.formatting.format_type == "arrow":
+ yield from ArrowExamplesIterable(self._iter_arrow, {})
+ else:
+ yield from self._iter()
+
+ def _iter(self):
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+
+ if self.formatting:
+ formatter = get_formatter(self.formatting.format_type)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self.batched:
+ for key, example in iterator:
+ # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
+ iterator_batch = (
+ iterator
+ if self.batch_size is None or self.batch_size <= 0
+ else islice(iterator, self.batch_size - 1)
+ )
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ keys, examples = zip(*key_examples_list)
+ if (
+ self.drop_last_batch
+ and self.batch_size is not None
+ and self.batch_size > 0
+ and len(examples) < self.batch_size
+ ): # ignore last batch
+ return
+ batch = _examples_to_batch(examples)
+ batch = format_dict(batch) if format_dict else batch
+ # then apply the transform
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ transformed_batch = dict(batch) # this will be updated with the function output
+ transformed_batch.update(self.function(*function_args, **self.fn_kwargs))
+ # then remove the unwanted columns
+ if self.remove_columns:
+ for c in self.remove_columns:
+ del transformed_batch[c]
+ if transformed_batch:
+ first_col = next(iter(transformed_batch))
+ bad_cols = [
+ col
+ for col in transformed_batch
+ if len(transformed_batch[col]) != len(transformed_batch[first_col])
+ ]
+ if bad_cols:
+ raise ValueError(
+ f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} while {first_col} has length {len(transformed_batch[first_col])}."
+ )
+ # the new key is the concatenation of the examples keys from the batch
+ new_key = "_".join(str(key) for key in keys)
+ # yield one example at a time from the transformed batch
+ for example in _batch_to_examples(transformed_batch):
+ yield new_key, example
+ current_idx += 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the transform and yield the example directly
+ # first copy the example, since we might drop some keys
+ example = dict(example)
+ example = format_dict(example) if format_dict else example
+ # then apply the transform
+ inputs = example
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ transformed_example = dict(example) # this will be updated with the function output
+ transformed_example.update(self.function(*function_args, **self.fn_kwargs))
+ # then we remove the unwanted columns
+ if self.remove_columns:
+ for c in self.remove_columns:
+ del transformed_example[c]
+ yield key, transformed_example
+ current_idx += 1
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ if self.ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ self.ex_iterable.iter_arrow(),
+ batch_size=self.batch_size if self.batched else 1,
+ drop_last_batch=self.drop_last_batch,
+ )
+ else:
+ iterator = _convert_to_arrow(
+ self.ex_iterable,
+ batch_size=self.batch_size if self.batched else 1,
+ drop_last_batch=self.drop_last_batch,
+ )
+ current_idx = 0
+ for key, pa_table in iterator:
+ # first build the batch
+ function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
+ if self.with_indices:
+ if self.batched:
+ function_args.append([current_idx + i for i in range(len(pa_table))])
+ else:
+ function_args.append(current_idx)
+ # then apply the transform
+ output_table = self.function(*function_args, **self.fn_kwargs)
+ if not isinstance(output_table, pa.Table):
+ raise TypeError(
+ f"Provided `function` which is applied to pyarrow tables returns a variable of type {type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset."
+ )
+ # we don't need to merge results for consistency with Dataset.map which merges iif both input and output are dicts
+ # then remove the unwanted columns
+ if self.remove_columns:
+ for column in self.remove_columns:
+ if column in output_table.column_names:
+ output_table = output_table.remove_column(output_table.column_names.index(column))
+ # return output
+ yield key, output_table
+ current_idx += len(pa_table)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return MappedExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ drop_last_batch=self.drop_last_batch,
+ remove_columns=self.remove_columns,
+ fn_kwargs=self.fn_kwargs,
+ formatting=self.formatting,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "MappedExamplesIterable":
+ """Keep only the requested shard."""
+ return MappedExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ drop_last_batch=self.drop_last_batch,
+ remove_columns=self.remove_columns,
+ fn_kwargs=self.fn_kwargs,
+ formatting=self.formatting,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class FilteredExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ formatting: Optional["FormattingConfig"] = None,
+ format_type="deprecated",
+ ):
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+ self.fn_kwargs = fn_kwargs or {}
+ self.formatting = formatting
+ if self.formatting and self.formatting.format_type == "arrow":
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ if self.formatting and self.formatting.format_type == "arrow":
+ yield from ArrowExamplesIterable(self._iter_arrow, {})
+ else:
+ yield from self._iter()
+
+ def _iter(self):
+ if self.formatting:
+ formatter = get_formatter(self.formatting.format_type)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+ if self.batched:
+ for key, example in iterator:
+ # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
+ iterator_batch = (
+ iterator
+ if self.batch_size is None or self.batch_size <= 0
+ else islice(iterator, self.batch_size - 1)
+ )
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ keys, examples = zip(*key_examples_list)
+ batch = _examples_to_batch(examples)
+ batch = format_dict(batch) if format_dict else batch
+ # then compute the mask for the batch
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ mask = self.function(*function_args, **self.fn_kwargs)
+ # yield one example at a time from the batch
+ for key_example, to_keep in zip(key_examples_list, mask):
+ if to_keep:
+ yield key_example
+ current_idx += 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the filtering function direcly
+ example = dict(example)
+ inputs = format_dict(example) if format_dict else example
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ to_keep = self.function(*function_args, **self.fn_kwargs)
+ if to_keep:
+ yield key, example
+ current_idx += 1
+
+ def _iter_arrow(self):
+ if self.ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ self.ex_iterable.iter_arrow(), batch_size=self.batch_size if self.batched else 1
+ )
+ else:
+ iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1)
+ current_idx = 0
+ for key, pa_table in iterator:
+ # first build the batch
+ function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
+ if self.with_indices:
+ if self.batched:
+ function_args.append([current_idx + i for i in range(len(pa_table))])
+ else:
+ function_args.append(current_idx)
+ # then apply the transform
+ mask = self.function(*function_args, **self.fn_kwargs)
+ # yield the filtered table
+ if self.batched:
+ yield key, pa_table.filter(mask)
+ elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask:
+ yield key, pa_table
+ current_idx += len(pa_table)
+
+ def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(seed),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "FilteredExamplesIterable":
+ """Keep only the requested shard."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class BufferShuffledExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.buffer_size = buffer_size
+ self.generator = generator
+ # TODO(QL): implement iter_arrow
+
+ @staticmethod
+ def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]:
+ while True:
+ yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
+
+ def __iter__(self):
+ buffer_size = self.buffer_size
+ rng = deepcopy(self.generator)
+ indices_iterator = self._iter_random_indices(rng, buffer_size)
+ # this is the shuffle buffer that we keep in memory
+ mem_buffer = []
+ for x in self.ex_iterable:
+ if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it
+ i = next(indices_iterator)
+ yield mem_buffer[i]
+ mem_buffer[i] = x # replace the picked example by a new one
+ else: # otherwise, keep filling the buffer
+ mem_buffer.append(x)
+ # when we run out of examples, we shuffle the remaining examples in the buffer and yield them
+ rng.shuffle(mem_buffer)
+ yield from mem_buffer
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable":
+ """Shuffle the wrapped examples iterable as well as the shuffling buffer."""
+ return BufferShuffledExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "BufferShuffledExamplesIterable":
+ """Keep only the requested shard."""
+ return BufferShuffledExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ buffer_size=self.buffer_size,
+ generator=self.generator,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class SkipExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.n = n
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ yield from islice(self.ex_iterable, self.n, None)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would skip examples from other shards instead."""
+ return self
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class TakeExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.n = n
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ yield from islice(self.ex_iterable, self.n)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would take examples from other shards instead."""
+ return self
+
+ @staticmethod
+ def split_number(num, n):
+ quotient = num // n
+ remainder = num % n
+ result = [quotient] * n
+ for i in range(remainder):
+ result[i] += 1
+ return result
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "TakeExamplesIterable":
+ """Keep only the requested shard."""
+ return TakeExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ n=self.split_number(self.n, num_workers)[worker_id],
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+def _apply_feature_types_on_example(
+ example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
+) -> dict:
+ example = dict(example)
+ # add missing columns
+ for column_name in features:
+ if column_name not in example:
+ example[column_name] = None
+ # we encode the example for ClassLabel feature types for example
+ encoded_example = features.encode_example(example)
+ # Decode example for Audio feature, e.g.
+ decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id)
+ return decoded_example
+
+
+def _apply_feature_types_on_batch(
+ batch: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
+) -> dict:
+ batch = dict(batch)
+ # add missing columns
+ n_examples = len(batch[next(iter(batch))])
+ for column_name in features:
+ if column_name not in batch:
+ batch[column_name] = [None] * n_examples
+ # we encode the batch for ClassLabel feature types for example
+ encoded_batch = features.encode_batch(batch)
+ # Decode batch for Audio feature, e.g.
+ decoded_batch = features.decode_batch(encoded_batch, token_per_repo_id=token_per_repo_id)
+ return decoded_batch
+
+
+class TypedExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ features: Features,
+ token_per_repo_id: Dict[str, Union[str, bool, None]],
+ ):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.features = features
+ self.token_per_repo_id = token_per_repo_id
+ if self.ex_iterable.iter_arrow is not None:
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ # Then for each example, `TypedExamplesIterable` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ for key, example in self.ex_iterable:
+ yield (
+ key,
+ _apply_feature_types_on_example(example, self.features, token_per_repo_id=self.token_per_repo_id),
+ )
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ schema = self.features.arrow_schema
+ for key, pa_table in self.ex_iterable.iter_arrow():
+ columns = set(pa_table.column_names)
+ # add missing columns
+ for column_name in self.features:
+ if column_name not in columns:
+ col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None])
+ pa_table = pa_table.append_column(column_name, col)
+ if pa_table.schema != schema:
+ pa_table = cast_table_to_features(pa_table, self.features)
+ yield key, pa_table
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "TypedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return TypedExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator),
+ features=self.features,
+ token_per_repo_id=self.token_per_repo_id,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "TypedExamplesIterable":
+ """Keep only the requested shard."""
+ return TypedExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ features=self.features,
+ token_per_repo_id=self.token_per_repo_id,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+@dataclass
+class FormattingConfig:
+ format_type: Optional[str]
+
+ def __post_init__(self):
+ if self.format_type == "pandas":
+ raise NotImplementedError(
+ "The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead."
+ )
+
+
+@dataclass
+class ShufflingConfig:
+ generator: np.random.Generator
+ _original_seed: Optional[int] = None
+
+
+@dataclass
+class DistributedConfig:
+ rank: int
+ world_size: int
+
+
+def _maybe_add_torch_iterable_dataset_parent_class(cls):
+ """Add torch.utils.data.IterableDataset as a parent class if 'torch' is available"""
+ if config.TORCH_AVAILABLE:
+ import torch.utils.data
+
+ if torch.utils.data.IterableDataset not in cls.__bases__:
+ cls.__bases__ += (torch.utils.data.IterableDataset,)
+
+
+class IterableDataset(DatasetInfoMixin):
+ """A Dataset backed by an iterable."""
+
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ formatting: Optional[FormattingConfig] = None,
+ shuffling: Optional[ShufflingConfig] = None,
+ distributed: Optional[DistributedConfig] = None,
+ token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
+ format_type="deprecated",
+ ):
+ if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None:
+ raise RuntimeError(
+ "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. "
+ "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. "
+ )
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+
+ info = info.copy() if info is not None else DatasetInfo()
+ DatasetInfoMixin.__init__(self, info=info, split=split)
+
+ self._ex_iterable = ex_iterable
+ self._formatting = formatting
+ self._shuffling = shuffling
+ self._distributed = distributed
+ self._epoch = 0
+ self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {}
+ _maybe_add_torch_iterable_dataset_parent_class(self.__class__)
+
+ def __repr__(self):
+ return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n n_shards: {self.n_shards}\n}})"
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling
+ _maybe_add_torch_iterable_dataset_parent_class(self.__class__)
+
+ def _head(self, n=5):
+ return _examples_to_batch(list(self.take(n)))
+
+ def _effective_generator(self):
+ if self._shuffling and self._epoch == 0:
+ return self._shuffling.generator
+ elif self._shuffling:
+ # Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars)
+ effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch
+ effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed
+ return np.random.default_rng(effective_seed)
+ else:
+ raise ValueError("This dataset is not shuffled")
+
+ @property
+ def n_shards(self) -> int:
+ if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0:
+ return self._ex_iterable.n_shards // self._distributed.world_size
+ return self._ex_iterable.n_shards
+
+ def _iter_pytorch(self):
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ # fix for fsspec when using multiprocess
+ _reset_fsspec_lock()
+ # check if there aren't too many workers
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers:
+ logger.warning(
+ f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). "
+ f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers."
+ )
+ logger.info(
+ f"To parallelize data loading, we give each process some shards (or data sources) to process. "
+ f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. "
+ f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}."
+ )
+ # split workload
+ _log_prefix = f"node#{self._distributed.rank} " if self._distributed else ""
+ shards_indices = ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers)
+ if shards_indices:
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards."
+ )
+ ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers)
+
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=1)
+ for key, pa_table in iterator:
+ yield formatter.format_row(pa_table)
+ return
+ else:
+ for key, example in ex_iterable:
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ example = _apply_feature_types_on_example(
+ example, self.features, token_per_repo_id=self._token_per_repo_id
+ )
+ yield format_dict(example) if format_dict else example
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards."
+ )
+ else:
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})."
+ )
+
+ def _is_main_process(self):
+ if self._distributed and self._distributed.rank > 0:
+ return False
+ if "torch" in sys.modules:
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if worker_info is not None and worker_info.id > 0:
+ return False
+ return True
+
+ def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable:
+ if self._shuffling:
+ ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator())
+ else:
+ ex_iterable = self._ex_iterable
+
+ if self._distributed:
+ rank = self._distributed.rank
+ world_size = self._distributed.world_size
+ if ex_iterable.n_shards % world_size == 0:
+ if self._is_main_process():
+ n_shards_per_node = ex_iterable.n_shards // world_size
+ plural = "s" if n_shards_per_node > 1 else ""
+ logger.info(
+ f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node."
+ )
+ ex_iterable = ex_iterable.shard_data_sources(rank, world_size)
+ else:
+ if self._is_main_process():
+ logger.info(
+ f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration."
+ )
+ logger.info(
+ f"It is more optimized to distribute the dataset shards (or data sources) across nodes. "
+ f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. "
+ f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}"
+ )
+ ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank)
+
+ return ex_iterable
+
+ def __iter__(self):
+ if "torch" in sys.modules:
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None:
+ # We're a torch.utils.data.IterableDataset in a PyTorch worker process
+ yield from self._iter_pytorch()
+ return
+
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=1)
+ for key, pa_table in iterator:
+ yield formatter.format_row(pa_table)
+ return
+
+ for key, example in ex_iterable:
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ example = _apply_feature_types_on_example(
+ example, self.features, token_per_repo_id=self._token_per_repo_id
+ )
+ yield format_dict(example) if format_dict else example
+
+ def iter(self, batch_size: int, drop_last_batch: bool = False):
+ """Iterate through the batches of size `batch_size`.
+
+ Args:
+ batch_size (:obj:`int`): size of each batch to yield.
+ drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
+ dropped
+ """
+
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch
+ )
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch)
+ for key, pa_table in iterator:
+ yield formatter.format_batch(pa_table)
+ return
+
+ iterator = iter(ex_iterable)
+ for key, example in iterator:
+ # If batched, first build the batch
+ examples = [example] + [example for key, example in islice(iterator, batch_size - 1)]
+ if drop_last_batch and len(examples) < batch_size: # ignore last batch
+ return
+ batch = _examples_to_batch(examples)
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_batch`.
+ batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id)
+ yield format_dict(batch) if format_dict else batch
+
+ @staticmethod
+ def from_generator(
+ generator: Callable,
+ features: Optional[Features] = None,
+ gen_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """Create an Iterable Dataset from a generator.
+
+ Args:
+ generator (`Callable`):
+ A generator function that `yields` examples.
+ features (`Features`, *optional*):
+ Dataset features.
+ gen_kwargs(`dict`, *optional*):
+ Keyword arguments to be passed to the `generator` callable.
+ You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`.
+ This can be used to improve shuffling and when iterating over the dataset with multiple workers.
+
+ Returns:
+ `IterableDataset`
+
+ Example:
+
+ ```py
+ >>> def gen():
+ ... yield {"text": "Good", "label": 0}
+ ... yield {"text": "Bad", "label": 1}
+ ...
+ >>> ds = IterableDataset.from_generator(gen)
+ ```
+
+ ```py
+ >>> def gen(shards):
+ ... for shard in shards:
+ ... with open(shard) as f:
+ ... for line in f:
+ ... yield {"line": line}
+ ...
+ >>> shards = [f"data{i}.txt" for i in range(32)]
+ >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
+ >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
+ >>> from torch.utils.data import DataLoader
+ >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
+ ```
+ """
+ from .io.generator import GeneratorDatasetInputStream
+
+ return GeneratorDatasetInputStream(
+ generator=generator,
+ features=features,
+ gen_kwargs=gen_kwargs,
+ streaming=True,
+ ).read()
+
+ @staticmethod
+ def from_spark(
+ df: "pyspark.sql.DataFrame",
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ **kwargs,
+ ) -> "IterableDataset":
+ """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches.
+
+ Args:
+ df (`pyspark.sql.DataFrame`):
+ The DataFrame containing the desired data.
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+
+ Returns:
+ [`IterableDataset`]
+
+ Example:
+
+ ```py
+ >>> df = spark.createDataFrame(
+ >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
+ >>> columns=["id", "name"],
+ >>> )
+ >>> ds = IterableDataset.from_spark(df)
+ ```
+ """
+ from .io.spark import SparkDatasetReader
+
+ if sys.platform == "win32":
+ raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows")
+
+ return SparkDatasetReader(
+ df,
+ split=split,
+ features=features,
+ streaming=True,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_file(filename: str) -> "IterableDataset":
+ """Instantiate a IterableDataset from Arrow table at filename.
+
+ Args:
+ filename (`str`):
+ File name of the dataset.
+
+ Returns:
+ [`IterableDataset`]
+ """
+ pa_table_schema = read_schema_from_file(filename)
+ inferred_features = Features.from_arrow_schema(pa_table_schema)
+ ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename})
+ return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features))
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ ) -> "IterableDataset":
+ """
+ Return a dataset with the specified format.
+ Supported formats: "arrow", or None for regular python objects.
+ The other formats are currently not implemented.
+
+ Args:
+
+ type (`str`, optional, default None): if set to "torch", the returned dataset
+ will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader
+ """
+ type = get_format_type_from_alias(type)
+ # TODO(QL): add format_kwargs
+ # TODO(QL): add format_columns and return_all_columns
+ # TODO(QL): add pandas format
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=FormattingConfig(format_type=type),
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ features: Optional[Features] = None,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """
+ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
+ If your function returns a column that already exists, then it overwrites it.
+ The function is applied on-the-fly on the examples when iterating over the dataset.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}.
+ - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`, *optional*, defaults to `None`):
+ Function applied on-the-fly on the examples when you iterate on the dataset.
+ It must have one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`[List[str]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ features (`[Features]`, *optional*, defaults to `None`):
+ Feature types of the resulting dataset.
+ fn_kwargs (`Dict`, *optional*, default `None`):
+ Keyword arguments to be passed to `function`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
+ ```
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+ if isinstance(remove_columns, str):
+ remove_columns = [remove_columns]
+ if function is None:
+ function = identity_func
+ if fn_kwargs is None:
+ fn_kwargs = {}
+ ex_iterable = MappedExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ fn_kwargs=fn_kwargs,
+ formatting=self._formatting,
+ )
+ info = self.info.copy()
+ info.features = features
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices=False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
+ The filtering is done on-the-fly when iterating over the dataset.
+
+ Args:
+ function (`Callable`):
+ Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
+ - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
+ - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
+ - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
+
+ If no function is provided, defaults to an always True function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, default `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ fn_kwargs (`Dict`, *optional*, default `None`):
+ Keyword arguments to be passed to `function`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> ds = ds.filter(lambda x: x["label"] == 0)
+ >>> list(ds.take(3))
+ [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'},
+ {'label': 0,
+ 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
+ {'label': 0,
+ 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
+ ```
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example)
+ info = copy.deepcopy(self._info)
+ info.features = None
+
+ # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here
+ ex_iterable = FilteredExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ fn_kwargs=fn_kwargs,
+ formatting=self._formatting,
+ )
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def shuffle(
+ self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
+ ) -> "IterableDataset":
+ """
+ Randomly shuffles the elements of this dataset.
+
+ This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer,
+ replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
+ equal to the full size of the dataset is required.
+
+ For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
+ initially select a random element from only the first 1000 elements in the buffer. Once an element is
+ selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
+ maintaining the 1000 element buffer.
+
+ If the dataset is made of several shards, it also does shuffle the order of the shards.
+ However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
+ then the order of the shards is kept unchanged.
+
+ Args:
+ seed (`int`, *optional*, defaults to `None`):
+ Random seed that will be used to shuffle the dataset.
+ It is used to sample from the shuffle buffer and also to shuffle the data shards.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ buffer_size (`int`, defaults to `1000`):
+ Size of the buffer.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> list(shuffled_ds.take(3))
+ [{'label': 1,
+ 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
+ {'label': 1,
+ 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
+ {'label': 1,
+ 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
+ ```
+ """
+ if generator is None:
+ generator = np.random.default_rng(seed)
+ else:
+ generator = deepcopy(generator)
+ shuffling = ShufflingConfig(generator=generator, _original_seed=seed)
+ return IterableDataset(
+ ex_iterable=BufferShuffledExamplesIterable(
+ self._ex_iterable, buffer_size=buffer_size, generator=generator
+ ).shuffle_data_sources(generator),
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=shuffling,
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def set_epoch(self, epoch: int):
+ self._epoch = epoch
+
+ def skip(self, n) -> "IterableDataset":
+ """
+ Create a new [`IterableDataset`] that skips the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to skip.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.skip(1)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'},
+ {'label': 1,
+ 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
+ ```
+ """
+ ex_iterable = SkipExamplesIterable(self._ex_iterable, n)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def take(self, n) -> "IterableDataset":
+ """
+ Create a new [`IterableDataset`] with only the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to take.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> small_ds = ds.take(2)
+ >>> list(small_ds)
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
+ ```
+ """
+ ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ @property
+ def column_names(self) -> Optional[List[str]]:
+ """Names of the columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
+ >>> ds.column_names
+ ['text', 'label']
+ ```
+ """
+ return list(self._info.features.keys()) if self._info.features is not None else None
+
+ def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset":
+ """Add column to Dataset.
+
+ Args:
+ name (str): Column name.
+ column (list or np.array): Column data to be added.
+
+ Returns:
+ `IterableDataset`
+ """
+ return self.map(partial(add_column_fn, name=name, column=column), with_indices=True)
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ >>> ds = ds.rename_column("text", "movie_review")
+ >>> next(iter(ds))
+ {'label': 1,
+ 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return self.rename_columns({original_column_name: new_column_name})
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with renamed columns
+ """
+
+ original_features = self._info.features.copy() if self._info.features else None
+ ds_iterable = self.map(
+ partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping)
+ )
+ if original_features is not None:
+ ds_iterable._info.features = Features(
+ {
+ column_mapping[col] if col in column_mapping.keys() else col: feature
+ for col, feature in original_features.items()
+ }
+ )
+ # check that it's still valid, especially with regard to task templates
+ try:
+ ds_iterable._info.copy()
+ except ValueError:
+ ds_iterable._info.task_templates = None
+ return ds_iterable
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+ The removal is done on-the-fly on the examples when iterating over the dataset.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
+ >>> ds = ds.remove_columns("label")
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ original_features = self._info.features.copy() if self._info.features else None
+ ds_iterable = self.map(remove_columns=column_names)
+ if original_features is not None:
+ ds_iterable._info.features = original_features.copy()
+ for col, _ in original_features.items():
+ if col in column_names:
+ del ds_iterable._info.features[col]
+ # check that it's still valid, especially with regard to task templates
+ try:
+ ds_iterable._info.copy()
+ except ValueError:
+ ds_iterable._info.task_templates = None
+
+ return ds_iterable
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
+ """Select one or several column(s) in the dataset and the features
+ associated to them. The selection is done on-the-fly on the examples
+ when iterating over the dataset.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to select.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset object with selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
+ >>> ds = ds.select_columns("text")
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ if self._info:
+ info = copy.deepcopy(self._info)
+ if self._info.features is not None:
+ missing_columns = set(column_names) - set(self._info.features.keys())
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the "
+ "dataset. Columns in the dataset: "
+ f"{list(self._info.features.keys())}."
+ )
+ info.features = Features({c: info.features[c] for c in column_names})
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+
+ ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=self._shuffling,
+ distributed=self._distributed,
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature (`Feature`):
+ Target feature.
+
+ Returns:
+ `IterableDataset`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, Audio
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True)
+ >>> ds.features
+ {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None),
+ 'english_transcription': Value(dtype='string', id=None),
+ 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
+ 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
+ 'path': Value(dtype='string', id=None),
+ 'transcription': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
+ >>> ds.features
+ {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None),
+ 'english_transcription': Value(dtype='string', id=None),
+ 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
+ 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
+ 'path': Value(dtype='string', id=None),
+ 'transcription': Value(dtype='string', id=None)}
+ ```
+ """
+ info = self._info.copy()
+ info.features[column] = feature
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def cast(
+ self,
+ features: Features,
+ ) -> "IterableDataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds.features.copy()
+ >>> new_features["label"] = ClassLabel(names=["bad", "good"])
+ >>> new_features["text"] = Value("large_string")
+ >>> ds = ds.cast(new_features)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ info = self._info.copy()
+ info.features = features
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def _step(self, step: int, offset: int) -> "IterableDataset":
+ ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def _resolve_features(self):
+ if self.features is not None:
+ return self
+ elif isinstance(self._ex_iterable, TypedExamplesIterable):
+ features = self._ex_iterable.features
+ else:
+ features = _infer_features_from_batch(self.with_format(None)._head())
+ info = self.info.copy()
+ info.features = features
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+
+def _concatenate_iterable_datasets(
+ dsets: List[IterableDataset],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+) -> IterableDataset:
+ """
+ Converts a list of `IterableDataset` with the same schema into a single `IterableDataset`.
+ Missing data are filled with None values.
+
+
+
+ Args:
+ dsets (`List[datasets.IterableDataset]`): List of Datasets to concatenate.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+ axis (``{0, 1}``, default ``0``, meaning over rows):
+ Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
+ (horizontally).
+
+ *New in version 1.6.0*
+
+ Example:
+
+ ```py
+ >>> ds3 = _concatenate_iterable_datasets([ds1, ds2])
+ ```
+ """
+ dsets = [d._resolve_features() for d in dsets]
+
+ # Perform checks (and a potentional cast if axis=0)
+ if axis == 0:
+ _check_if_features_can_be_aligned([dset.features for dset in dsets])
+ else:
+ _check_column_names([col_name for dset in dsets for col_name in dset.features])
+
+ # TODO: improve this to account for a mix of ClassLabel and Value for example
+ # right now it would keep the type of the first dataset in the list
+ features = Features(
+ {k: v for features in _align_features([dset.features for dset in dsets]) for k, v in features.items()}
+ )
+
+ ex_iterables = [d._ex_iterable for d in dsets]
+ if axis == 0:
+ ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+ else:
+ ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+ # Set new info - we update the features
+ # setting the features also ensures to fill missing columns with None
+ if info is None:
+ info = DatasetInfo.from_merge([d.info for d in dsets])
+ else:
+ info = info.copy()
+ info.features = features
+ # Get all the auth tokens per repository - in case the datasets come from different private repositories
+ token_per_repo_id = {repo_id: token for dataset in dsets for repo_id, token in dataset._token_per_repo_id.items()}
+ # Return new daset
+ return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
+
+
+def _interleave_iterable_datasets(
+ datasets: List[IterableDataset],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+) -> IterableDataset:
+ """
+ Interleave several iterable datasets (sources) into a single iterable dataset.
+ The new iterable dataset alternates between the sources to yield examples.
+ If `probabilities = None` (default) the iterable dataset will cycles through the sources in order for each next example in the iteration.
+ If `probabilities` is not `None, the iterable dataset will sample a random source according to the provided probabilities for each next examples in the iteration.
+
+
+
+ Args:
+ datasets (`List[IterableDataset]`): list of datasets to interleave
+ probabilities (`List[float]`, optional, default None): If specified, the new iterable dataset samples
+ examples from one source at a time according to these probabilities.
+ seed (`int`, optional, default None): The random seed used to choose a source for each example.
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+
+ Output:
+ `datasets.IterableDataset`
+ """
+ datasets = [d._resolve_features() for d in datasets]
+
+ # Perform checks
+ _check_if_features_can_be_aligned([dset.features for dset in datasets])
+
+ # TODO: improve this to account for a mix of ClassLabel and Value for example
+ # right now it would keep the type of the first dataset in the list
+ features = Features(
+ {k: v for features in _align_features([dset.features for dset in datasets]) for k, v in features.items()}
+ )
+
+ ex_iterables = [d._ex_iterable for d in datasets]
+
+ # Use cycling or random cycling of sources
+ if probabilities is None:
+ ex_iterable = CyclingMultiSourcesExamplesIterable(ex_iterables, stopping_strategy=stopping_strategy)
+ else:
+ generator = np.random.default_rng(seed)
+ ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
+ ex_iterables, generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy
+ )
+ # Set new info - we update the features
+ # setting the features also ensures to fill missing columns with None
+ if info is None:
+ info = DatasetInfo.from_merge([d.info for d in datasets])
+ else:
+ info = info.copy()
+ info.features = features
+ # Get all the auth tokens per repository - in case the datasets come from different private repositories
+ token_per_repo_id = {
+ repo_id: token for dataset in datasets for repo_id, token in dataset._token_per_repo_id.items()
+ }
+ # Return new daset
+ return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
+
+
+def _split_by_node_iterable_dataset(dataset: IterableDataset, rank: int, world_size: int) -> IterableDataset:
+ """
+ Split an iterable dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
+ then the shards are evenly assigned across the nodes, which is the most optimized.
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
+
+ Args:
+ dataset ([`IterableDataset`]):
+ The iterable dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`IterableDataset`]: The iterable dataset to be used on the node at rank `rank`.
+ """
+ if dataset._distributed:
+ world_size = world_size * dataset._distributed.world_size
+ rank = world_size * dataset._distributed.rank + rank
+ distributed = DistributedConfig(rank=rank, world_size=world_size)
+ return IterableDataset(
+ ex_iterable=dataset._ex_iterable,
+ info=dataset._info.copy(),
+ split=dataset._split,
+ formatting=dataset._formatting,
+ shuffling=copy.deepcopy(dataset._shuffling),
+ distributed=distributed,
+ token_per_repo_id=dataset._token_per_repo_id,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/keyhash.py b/env-llmeval/lib/python3.10/site-packages/datasets/keyhash.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c75fcfd7ffb300aac1ffd0fc822287f21b56f8a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/keyhash.py
@@ -0,0 +1,104 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+
+"""
+Hashing function for dataset keys using `hashlib.md5`
+
+Requirements for the hash function:
+
+- Provides a uniformly distributed hash from random space
+- Adequately fast speed
+- Working with multiple input types (in this case, `str`, `int` or `bytes`)
+- Should be platform independent (generates same hash on different OS and systems)
+
+The hashing function provides a unique 128-bit integer hash of the key provided.
+
+The split name is being used here as the hash salt to avoid having same hashes
+in different splits due to same keys
+"""
+
+from typing import Union
+
+from huggingface_hub.utils import insecure_hashlib
+
+
+def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
+ """
+ Returns the input hash_data in its bytes form
+
+ Args:
+ hash_data: the hash salt/key to be converted to bytes
+ """
+ if isinstance(hash_data, bytes):
+ # Data already in bytes, returns as it as
+ return hash_data
+ elif isinstance(hash_data, str):
+ # We keep the data as it as for it ot be later encoded to UTF-8
+ # However replace `\\` with `/` for Windows compatibility
+ hash_data = hash_data.replace("\\", "/")
+ elif isinstance(hash_data, int):
+ hash_data = str(hash_data)
+ else:
+ # If data is not of the required type, raise error
+ raise InvalidKeyError(hash_data)
+
+ return hash_data.encode("utf-8")
+
+
+class InvalidKeyError(Exception):
+ """Raises an error when given key is of invalid datatype."""
+
+ def __init__(self, hash_data):
+ self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
+ self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
+ self.suffix = "\nKeys should be either str, int or bytes type"
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
+
+
+class DuplicatedKeysError(Exception):
+ """Raise an error when duplicate key found."""
+
+ def __init__(self, key, duplicate_key_indices, fix_msg=""):
+ self.key = key
+ self.duplicate_key_indices = duplicate_key_indices
+ self.fix_msg = fix_msg
+ self.prefix = "Found multiple examples generated with the same key"
+ if len(duplicate_key_indices) <= 20:
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
+ else:
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
+ self.suffix = "\n" + fix_msg if fix_msg else ""
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
+
+
+class KeyHasher:
+ """KeyHasher class for providing hash using md5"""
+
+ def __init__(self, hash_salt: str):
+ self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
+
+ def hash(self, key: Union[str, int, bytes]) -> int:
+ """Returns 128-bits unique hash of input key
+
+ Args:
+ key: the input key to be hashed (should be str, int or bytes)
+
+ Returns: 128-bit int hash key"""
+ md5 = self._split_md5.copy()
+ byte_key = _as_bytes(key)
+ md5.update(byte_key)
+ # Convert to integer with hexadecimal conversion
+ return int(md5.hexdigest(), 16)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/load.py b/env-llmeval/lib/python3.10/site-packages/datasets/load.py
new file mode 100644
index 0000000000000000000000000000000000000000..df59ac40d8d7644f97117f7b0c120fb65ffe8ee1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/load.py
@@ -0,0 +1,2673 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Access datasets."""
+
+import filecmp
+import glob
+import importlib
+import inspect
+import json
+import os
+import posixpath
+import shutil
+import signal
+import time
+import warnings
+from collections import Counter
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
+
+import fsspec
+import requests
+import yaml
+from huggingface_hub import DatasetCard, DatasetCardData, HfApi, HfFileSystem
+
+from . import config
+from .arrow_dataset import Dataset
+from .builder import BuilderConfig, DatasetBuilder
+from .data_files import (
+ DEFAULT_PATTERNS_ALL,
+ DataFilesDict,
+ DataFilesList,
+ DataFilesPatternsDict,
+ DataFilesPatternsList,
+ EmptyDatasetError,
+ get_data_patterns,
+ get_metadata_patterns,
+ sanitize_patterns,
+)
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadMode
+from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin
+from .exceptions import DataFilesNotFoundError, DatasetNotFoundError
+from .features import Features
+from .fingerprint import Hasher
+from .info import DatasetInfo, DatasetInfosDict
+from .iterable_dataset import IterableDataset
+from .metric import Metric
+from .naming import camelcase_to_snakecase, snakecase_to_camelcase
+from .packaged_modules import (
+ _EXTENSION_TO_MODULE,
+ _MODULE_SUPPORTS_METADATA,
+ _MODULE_TO_EXTENSIONS,
+ _PACKAGED_DATASETS_MODULES,
+ _hash_python_lines,
+)
+from .splits import Split
+from .utils import _datasets_server
+from .utils._filelock import FileLock
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import (
+ OfflineModeIsEnabled,
+ _raise_if_offline_mode_is_enabled,
+ cached_path,
+ head_hf_s3,
+ hf_github_url,
+ init_hf_modules,
+ is_relative_path,
+ relative_to_absolute_path,
+ url_or_path_join,
+)
+from .utils.hub import hf_hub_url
+from .utils.info_utils import VerificationMode, is_small_dataset
+from .utils.logging import get_logger
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import get_imports
+from .utils.version import Version
+
+
+logger = get_logger(__name__)
+
+ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + [".zip"]
+
+
+def _raise_timeout_error(signum, frame):
+ raise ValueError(
+ "Loading this dataset requires you to execute custom code contained in the dataset repository on your local "
+ "machine. Please set the option `trust_remote_code=True` to permit loading of this dataset."
+ )
+
+
+def resolve_trust_remote_code(trust_remote_code: Optional[bool], repo_id: str) -> bool:
+ """
+ Copied and adapted from Transformers
+ https://github.com/huggingface/transformers/blob/2098d343cc4b4b9d2aea84b3cf1eb5a1e610deff/src/transformers/dynamic_module_utils.py#L589
+ """
+ trust_remote_code = trust_remote_code if trust_remote_code is not None else config.HF_DATASETS_TRUST_REMOTE_CODE
+ if trust_remote_code is None:
+ if config.TIME_OUT_REMOTE_CODE > 0:
+ try:
+ signal.signal(signal.SIGALRM, _raise_timeout_error)
+ signal.alarm(config.TIME_OUT_REMOTE_CODE)
+ while trust_remote_code is None:
+ answer = input(
+ f"The repository for {repo_id} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n"
+ f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n"
+ f"Do you wish to run the custom code? [y/N] "
+ )
+ if answer.lower() in ["yes", "y", "1"]:
+ trust_remote_code = True
+ elif answer.lower() in ["no", "n", "0", ""]:
+ trust_remote_code = False
+ signal.alarm(0)
+ except Exception:
+ # OS which does not support signal.SIGALRM
+ raise ValueError(
+ f"The repository for {repo_id} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n"
+ f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
+ )
+ else:
+ # For the CI which might put the timeout at 0
+ _raise_timeout_error(None, None)
+ return trust_remote_code
+
+
+def init_dynamic_modules(
+ name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
+):
+ """
+ Create a module with name `name` in which you can add dynamic modules
+ such as metrics or datasets. The module can be imported using its name.
+ The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
+ be overridden by specifying a path to another directory in `hf_modules_cache`.
+ """
+ hf_modules_cache = init_hf_modules(hf_modules_cache)
+ dynamic_modules_path = os.path.join(hf_modules_cache, name)
+ os.makedirs(dynamic_modules_path, exist_ok=True)
+ if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
+ with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
+ pass
+ return dynamic_modules_path
+
+
+def import_main_class(module_path, dataset=True) -> Optional[Union[Type[DatasetBuilder], Type[Metric]]]:
+ """Import a module at module_path and return its main class:
+ - a DatasetBuilder if dataset is True
+ - a Metric if dataset is False
+ """
+ module = importlib.import_module(module_path)
+
+ if dataset:
+ main_cls_type = DatasetBuilder
+ else:
+ main_cls_type = Metric
+
+ # Find the main class in our imported module
+ module_main_cls = None
+ for name, obj in module.__dict__.items():
+ if inspect.isclass(obj) and issubclass(obj, main_cls_type):
+ if inspect.isabstract(obj):
+ continue
+ module_main_cls = obj
+ obj_module = inspect.getmodule(obj)
+ if obj_module is not None and module == obj_module:
+ break
+
+ return module_main_cls
+
+
+class _InitializeConfiguredDatasetBuilder:
+ """
+ From https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class
+ See also ConfiguredDatasetBuilder.__reduce__
+ When called with the param value as the only argument, returns an
+ un-initialized instance of the parameterized class. Subsequent __setstate__
+ will be called by pickle.
+ """
+
+ def __call__(self, builder_cls, metadata_configs, default_config_name, name):
+ # make a simple object which has no complex __init__ (this one will do)
+ obj = _InitializeConfiguredDatasetBuilder()
+ obj.__class__ = configure_builder_class(
+ builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name
+ )
+ return obj
+
+
+def configure_builder_class(
+ builder_cls: Type[DatasetBuilder],
+ builder_configs: List[BuilderConfig],
+ default_config_name: Optional[str],
+ dataset_name: str,
+) -> Type[DatasetBuilder]:
+ """
+ Dynamically create a builder class with custom builder configs parsed from README.md file,
+ i.e. set BUILDER_CONFIGS class variable of a builder class to custom configs list.
+ """
+
+ class ConfiguredDatasetBuilder(builder_cls):
+ BUILDER_CONFIGS = builder_configs
+ DEFAULT_CONFIG_NAME = default_config_name
+
+ __module__ = builder_cls.__module__ # so that the actual packaged builder can be imported
+
+ def __reduce__(self): # to make dynamically created class pickable, see _InitializeParameterizedDatasetBuilder
+ parent_builder_cls = self.__class__.__mro__[1]
+ return (
+ _InitializeConfiguredDatasetBuilder(),
+ (
+ parent_builder_cls,
+ self.BUILDER_CONFIGS,
+ self.DEFAULT_CONFIG_NAME,
+ self.dataset_name,
+ ),
+ self.__dict__.copy(),
+ )
+
+ ConfiguredDatasetBuilder.__name__ = (
+ f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
+ )
+ ConfiguredDatasetBuilder.__qualname__ = (
+ f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
+ )
+
+ return ConfiguredDatasetBuilder
+
+
+def get_dataset_builder_class(
+ dataset_module: "DatasetModule", dataset_name: Optional[str] = None
+) -> Type[DatasetBuilder]:
+ builder_cls = import_main_class(dataset_module.module_path)
+ if dataset_module.builder_configs_parameters.builder_configs:
+ dataset_name = dataset_name or dataset_module.builder_kwargs.get("dataset_name")
+ if dataset_name is None:
+ raise ValueError("dataset_name should be specified but got None")
+ builder_cls = configure_builder_class(
+ builder_cls,
+ builder_configs=dataset_module.builder_configs_parameters.builder_configs,
+ default_config_name=dataset_module.builder_configs_parameters.default_config_name,
+ dataset_name=dataset_name,
+ )
+ return builder_cls
+
+
+def files_to_hash(file_paths: List[str]) -> str:
+ """
+ Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
+ """
+ # List all python files in directories if directories are supplied as part of external imports
+ to_use_files: List[Union[Path, str]] = []
+ for file_path in file_paths:
+ if os.path.isdir(file_path):
+ to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
+ else:
+ to_use_files.append(file_path)
+
+ # Get the code from all these files
+ lines = []
+ for file_path in to_use_files:
+ with open(file_path, encoding="utf-8") as f:
+ lines.extend(f.readlines())
+ return _hash_python_lines(lines)
+
+
+def increase_load_count(name: str, resource_type: str):
+ """Update the download count of a dataset or metric."""
+ if not config.HF_DATASETS_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
+ try:
+ head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset"))
+ except Exception:
+ pass
+
+
+def _download_additional_modules(
+ name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]
+) -> List[Tuple[str, str]]:
+ """
+ Download additional module for a module .py at URL (or local path) /.py
+ The imports must have been parsed first using ``get_imports``.
+
+ If some modules need to be installed with pip, an error is raised showing how to install them.
+ This function return the list of downloaded modules as tuples (import_name, module_file_path).
+
+ The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``.
+ """
+ local_imports = []
+ library_imports = []
+ download_config = download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading extra modules"
+ for import_type, import_name, import_path, sub_directory in imports:
+ if import_type == "library":
+ library_imports.append((import_name, import_path)) # Import from a library
+ continue
+
+ if import_name == name:
+ raise ValueError(
+ f"Error in the {name} script, importing relative {import_name} module "
+ f"but {import_name} is the name of the script. "
+ f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
+ f"comment pointing to the original relative import file path."
+ )
+ if import_type == "internal":
+ url_or_filename = url_or_path_join(base_path, import_path + ".py")
+ elif import_type == "external":
+ url_or_filename = import_path
+ else:
+ raise ValueError("Wrong import_type")
+
+ local_import_path = cached_path(
+ url_or_filename,
+ download_config=download_config,
+ )
+ if sub_directory is not None:
+ local_import_path = os.path.join(local_import_path, sub_directory)
+ local_imports.append((import_name, local_import_path))
+
+ # Check library imports
+ needs_to_be_installed = {}
+ for library_import_name, library_import_path in library_imports:
+ try:
+ lib = importlib.import_module(library_import_name) # noqa F841
+ except ImportError:
+ if library_import_name not in needs_to_be_installed or library_import_path != library_import_name:
+ needs_to_be_installed[library_import_name] = library_import_path
+ if needs_to_be_installed:
+ _dependencies_str = "dependencies" if len(needs_to_be_installed) > 1 else "dependency"
+ _them_str = "them" if len(needs_to_be_installed) > 1 else "it"
+ if "sklearn" in needs_to_be_installed.keys():
+ needs_to_be_installed["sklearn"] = "scikit-learn"
+ if "Bio" in needs_to_be_installed.keys():
+ needs_to_be_installed["Bio"] = "biopython"
+ raise ImportError(
+ f"To be able to use {name}, you need to install the following {_dependencies_str}: "
+ f"{', '.join(needs_to_be_installed)}.\nPlease install {_them_str} using 'pip install "
+ f"{' '.join(needs_to_be_installed.values())}' for instance."
+ )
+ return local_imports
+
+
+def _copy_script_and_other_resources_in_importable_dir(
+ name: str,
+ importable_directory_path: str,
+ subdirectory_name: str,
+ original_local_path: str,
+ local_imports: List[Tuple[str, str]],
+ additional_files: List[Tuple[str, str]],
+ download_mode: Optional[Union[DownloadMode, str]],
+) -> str:
+ """Copy a script and its required imports to an importable directory
+
+ Args:
+ name (str): name of the resource to load
+ importable_directory_path (str): path to the loadable folder in the dynamic modules directory
+ subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script
+ original_local_path (str): local path to the resource script
+ local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy)
+ additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy)
+ download_mode (Optional[Union[DownloadMode, str]]): download mode
+
+ Return:
+ importable_local_file: path to an importable module with importlib.import_module
+ """
+
+ # Define a directory with a unique name in our dataset or metric folder
+ # path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
+ # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
+ importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name)
+ importable_local_file = os.path.join(importable_subdirectory, name + ".py")
+ # Prevent parallel disk operations
+ lock_path = importable_directory_path + ".lock"
+ with FileLock(lock_path):
+ # Create main dataset/metrics folder if needed
+ if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path):
+ shutil.rmtree(importable_directory_path)
+ os.makedirs(importable_directory_path, exist_ok=True)
+
+ # add an __init__ file to the main dataset folder if needed
+ init_file_path = os.path.join(importable_directory_path, "__init__.py")
+ if not os.path.exists(init_file_path):
+ with open(init_file_path, "w"):
+ pass
+
+ # Create hash dataset folder if needed
+ os.makedirs(importable_subdirectory, exist_ok=True)
+ # add an __init__ file to the hash dataset folder if needed
+ init_file_path = os.path.join(importable_subdirectory, "__init__.py")
+ if not os.path.exists(init_file_path):
+ with open(init_file_path, "w"):
+ pass
+
+ # Copy dataset.py file in hash folder if needed
+ if not os.path.exists(importable_local_file):
+ shutil.copyfile(original_local_path, importable_local_file)
+ # Record metadata associating original dataset path with local unique folder
+ # Use os.path.splitext to split extension from importable_local_file
+ meta_path = os.path.splitext(importable_local_file)[0] + ".json"
+ if not os.path.exists(meta_path):
+ meta = {"original file path": original_local_path, "local file path": importable_local_file}
+ # the filename is *.py in our case, so better rename to filename.json instead of filename.py.json
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
+ json.dump(meta, meta_file)
+
+ # Copy all the additional imports
+ for import_name, import_path in local_imports:
+ if os.path.isfile(import_path):
+ full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py")
+ if not os.path.exists(full_path_local_import):
+ shutil.copyfile(import_path, full_path_local_import)
+ elif os.path.isdir(import_path):
+ full_path_local_import = os.path.join(importable_subdirectory, import_name)
+ if not os.path.exists(full_path_local_import):
+ shutil.copytree(import_path, full_path_local_import)
+ else:
+ raise ImportError(f"Error with local import at {import_path}")
+
+ # Copy additional files like dataset_infos.json file if needed
+ for file_name, original_path in additional_files:
+ destination_additional_path = os.path.join(importable_subdirectory, file_name)
+ if not os.path.exists(destination_additional_path) or not filecmp.cmp(
+ original_path, destination_additional_path
+ ):
+ shutil.copyfile(original_path, destination_additional_path)
+ return importable_local_file
+
+
+def _get_importable_file_path(
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+) -> str:
+ importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
+ return os.path.join(importable_directory_path, subdirectory_name, name + ".py")
+
+
+def _create_importable_file(
+ local_path: str,
+ local_imports: List[Tuple[str, str]],
+ additional_files: List[Tuple[str, str]],
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+ download_mode: DownloadMode,
+) -> None:
+ importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
+ Path(importable_directory_path).mkdir(parents=True, exist_ok=True)
+ (Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True)
+ importable_local_file = _copy_script_and_other_resources_in_importable_dir(
+ name=name.split("/")[-1],
+ importable_directory_path=importable_directory_path,
+ subdirectory_name=subdirectory_name,
+ original_local_path=local_path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ download_mode=download_mode,
+ )
+ logger.debug(f"Created importable dataset file at {importable_local_file}")
+
+
+def _load_importable_file(
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+) -> Tuple[str, str]:
+ module_path = ".".join(
+ [
+ os.path.basename(dynamic_modules_path),
+ module_namespace,
+ name.replace("/", "--"),
+ subdirectory_name,
+ name.split("/")[-1],
+ ]
+ )
+ return module_path, subdirectory_name
+
+
+def infer_module_for_data_files_list(
+ data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], dict]:
+ """Infer module (and builder kwargs) from list of data files.
+
+ It picks the module based on the most common file extension.
+ In case of a draw ".parquet" is the favorite, and then alphabetical order.
+
+ Args:
+ data_files_list (DataFilesList): List of data files.
+ download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - dict of builder kwargs
+ """
+ extensions_counter = Counter(
+ ("." + suffix.lower(), xbasename(filepath) in ("metadata.jsonl", "metadata.csv"))
+ for filepath in data_files_list[: config.DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE]
+ for suffix in xbasename(filepath).split(".")[1:]
+ )
+ if extensions_counter:
+
+ def sort_key(ext_count: Tuple[Tuple[str, bool], int]) -> Tuple[int, bool]:
+ """Sort by count and set ".parquet" as the favorite in case of a draw, and ignore metadata files"""
+ (ext, is_metadata), count = ext_count
+ return (not is_metadata, count, ext == ".parquet", ext)
+
+ for (ext, _), _ in sorted(extensions_counter.items(), key=sort_key, reverse=True):
+ if ext in _EXTENSION_TO_MODULE:
+ return _EXTENSION_TO_MODULE[ext]
+ elif ext == ".zip":
+ return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config)
+ return None, {}
+
+
+def infer_module_for_data_files_list_in_archives(
+ data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], dict]:
+ """Infer module (and builder kwargs) from list of archive data files.
+
+ Args:
+ data_files_list (DataFilesList): List of data files.
+ download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - dict of builder kwargs
+ """
+ archived_files = []
+ archive_files_counter = 0
+ for filepath in data_files_list:
+ if str(filepath).endswith(".zip"):
+ archive_files_counter += 1
+ if archive_files_counter > config.GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE:
+ break
+ extracted = xjoin(StreamingDownloadManager().extract(filepath), "**")
+ archived_files += [
+ f.split("::")[0]
+ for f in xglob(extracted, recursive=True, download_config=download_config)[
+ : config.ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE
+ ]
+ ]
+ extensions_counter = Counter(
+ "." + suffix.lower() for filepath in archived_files for suffix in xbasename(filepath).split(".")[1:]
+ )
+ if extensions_counter:
+ most_common = extensions_counter.most_common(1)[0][0]
+ if most_common in _EXTENSION_TO_MODULE:
+ return _EXTENSION_TO_MODULE[most_common]
+ return None, {}
+
+
+def infer_module_for_data_files(
+ data_files: DataFilesDict, path: Optional[str] = None, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], Dict[str, Any]]:
+ """Infer module (and builder kwargs) from data files. Raise if module names for different splits don't match.
+
+ Args:
+ data_files ([`DataFilesDict`]): Dict of list of data files.
+ path (str, *optional*): Dataset name or path.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters to authenticate on the Hugging Face Hub for private remote files.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - builder kwargs
+ """
+ split_modules = {
+ split: infer_module_for_data_files_list(data_files_list, download_config=download_config)
+ for split, data_files_list in data_files.items()
+ }
+ module_name, default_builder_kwargs = next(iter(split_modules.values()))
+ if any((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values()):
+ raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}")
+ if not module_name:
+ raise DataFilesNotFoundError("No (supported) data files found" + (f" in {path}" if path else ""))
+ return module_name, default_builder_kwargs
+
+
+def create_builder_configs_from_metadata_configs(
+ module_path: str,
+ metadata_configs: MetadataConfigs,
+ supports_metadata: bool,
+ base_path: Optional[str] = None,
+ default_builder_kwargs: Dict[str, Any] = None,
+ download_config: Optional[DownloadConfig] = None,
+) -> Tuple[List[BuilderConfig], str]:
+ builder_cls = import_main_class(module_path)
+ builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS
+ default_config_name = metadata_configs.get_default_config_name()
+ builder_configs = []
+ default_builder_kwargs = {} if default_builder_kwargs is None else default_builder_kwargs
+
+ base_path = base_path if base_path is not None else ""
+ for config_name, config_params in metadata_configs.items():
+ config_data_files = config_params.get("data_files")
+ config_data_dir = config_params.get("data_dir")
+ config_base_path = xjoin(base_path, config_data_dir) if config_data_dir else base_path
+ try:
+ config_patterns = (
+ sanitize_patterns(config_data_files)
+ if config_data_files is not None
+ else get_data_patterns(config_base_path)
+ )
+ config_data_files_dict = DataFilesPatternsDict.from_patterns(
+ config_patterns,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ )
+ except EmptyDatasetError as e:
+ raise EmptyDatasetError(
+ f"Dataset at '{base_path}' doesn't contain data files matching the patterns for config '{config_name}',"
+ f" check `data_files` and `data_fir` parameters in the `configs` YAML field in README.md. "
+ ) from e
+ if config_data_files is None and supports_metadata and config_patterns != DEFAULT_PATTERNS_ALL:
+ try:
+ config_metadata_patterns = get_metadata_patterns(base_path, download_config=download_config)
+ except FileNotFoundError:
+ config_metadata_patterns = None
+ if config_metadata_patterns is not None:
+ config_metadata_data_files_list = DataFilesPatternsList.from_patterns(config_metadata_patterns)
+ config_data_files_dict = DataFilesPatternsDict(
+ {
+ split: data_files_list + config_metadata_data_files_list
+ for split, data_files_list in config_data_files_dict.items()
+ }
+ )
+ ignored_params = [
+ param for param in config_params if not hasattr(builder_config_cls, param) and param != "default"
+ ]
+ if ignored_params:
+ logger.warning(
+ f"Some datasets params were ignored: {ignored_params}. "
+ "Make sure to use only valid params for the dataset builder and to have "
+ "a up-to-date version of the `datasets` library."
+ )
+ builder_configs.append(
+ builder_config_cls(
+ name=config_name,
+ data_files=config_data_files_dict,
+ data_dir=config_data_dir,
+ **{
+ param: value
+ for param, value in {**default_builder_kwargs, **config_params}.items()
+ if hasattr(builder_config_cls, param) and param not in ("default", "data_files", "data_dir")
+ },
+ )
+ )
+ return builder_configs, default_config_name
+
+
+@dataclass
+class BuilderConfigsParameters:
+ """Dataclass containing objects related to creation of builder configurations from yaml's metadata content.
+
+ Attributes:
+ metadata_configs (`MetadataConfigs`, *optional*):
+ Configs parsed from yaml's metadata.
+ builder_configs (`list[BuilderConfig]`, *optional*):
+ List of BuilderConfig objects created from metadata_configs above.
+ default_config_name (`str`):
+ Name of default config taken from yaml's metadata.
+ """
+
+ metadata_configs: Optional[MetadataConfigs] = None
+ builder_configs: Optional[List[BuilderConfig]] = None
+ default_config_name: Optional[str] = None
+
+
+@dataclass
+class DatasetModule:
+ module_path: str
+ hash: str
+ builder_kwargs: dict
+ builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters)
+ dataset_infos: Optional[DatasetInfosDict] = None
+
+
+@dataclass
+class MetricModule:
+ module_path: str
+ hash: str
+
+
+class _DatasetModuleFactory:
+ def get_module(self) -> DatasetModule:
+ raise NotImplementedError
+
+
+class _MetricModuleFactory:
+ def get_module(self) -> MetricModule:
+ raise NotImplementedError
+
+
+class GithubMetricModuleFactory(_MetricModuleFactory):
+ """Get the module of a metric. The metric script is downloaded from GitHub.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[str] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config.copy() if download_config else DownloadConfig()
+ if self.download_config.max_retries < 3:
+ self.download_config.max_retries = 3
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+ assert self.name.count("/") == 0
+ increase_load_count(name, resource_type="metric")
+
+ def download_loading_script(self, revision: Optional[str]) -> str:
+ file_path = hf_github_url(path=self.name, name=self.name + ".py", revision=revision, dataset=False)
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading builder script"
+ return cached_path(file_path, download_config=download_config)
+
+ def get_module(self) -> MetricModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ _loading_script_url = hf_github_url(
+ path=self.name, name=self.name + ".py", revision=self.revision, dataset=False
+ )
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the metric. You can inspect the repository content at {_loading_script_url}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ revision = self.revision
+ try:
+ local_path = self.download_loading_script(revision)
+ revision = self.revision
+ except FileNotFoundError:
+ if revision is not None:
+ raise
+ else:
+ revision = "main"
+ local_path = self.download_loading_script(revision)
+ logger.warning(
+ f"Couldn't find a directory or a metric named '{self.name}' in this version. "
+ f"It was picked from the main branch on github instead."
+ )
+ imports = get_imports(local_path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=hf_github_url(path=self.name, name="", revision=revision, dataset=False),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=local_path,
+ local_imports=local_imports,
+ additional_files=[],
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+class LocalMetricModuleFactory(_MetricModuleFactory):
+ """Get the module of a local metric. The metric script is loaded from a local script.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ path: str,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[str] = None,
+ ):
+ self.path = path
+ self.name = Path(path).stem
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+
+ def get_module(self) -> MetricModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the metric. You can inspect the repository content at {self.path}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ imports = get_imports(self.path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=str(Path(self.path).parent),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([self.path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=self.path,
+ local_imports=local_imports,
+ additional_files=[],
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+class LocalDatasetModuleFactoryWithScript(_DatasetModuleFactory):
+ """Get the module of a local dataset. The dataset script is loaded from a local script."""
+
+ def __init__(
+ self,
+ path: str,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ ):
+ self.path = path
+ self.name = Path(path).stem
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+
+ def get_module(self) -> DatasetModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at {self.path}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ dataset_infos_path = Path(self.path).parent / config.DATASETDICT_INFOS_FILENAME
+ dataset_readme_path = Path(self.path).parent / config.REPOCARD_FILENAME
+ imports = get_imports(self.path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=str(Path(self.path).parent),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ additional_files = []
+ if dataset_infos_path.is_file():
+ additional_files.append((config.DATASETDICT_INFOS_FILENAME, str(dataset_infos_path)))
+ if dataset_readme_path.is_file():
+ additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([self.path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=self.path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ builder_kwargs = {"base_path": str(Path(self.path).parent)}
+ return DatasetModule(module_path, hash, builder_kwargs)
+
+
+class LocalDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
+ """Get the module of a dataset loaded from the user's data files. The dataset builder module to use is inferred
+ from the data files extensions."""
+
+ def __init__(
+ self,
+ path: str,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ if data_dir and os.path.isabs(data_dir):
+ raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}")
+
+ self.path = Path(path).as_posix()
+ self.name = Path(path).stem
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_mode = download_mode
+
+ def get_module(self) -> DatasetModule:
+ readme_path = os.path.join(self.path, config.REPOCARD_FILENAME)
+ standalone_yaml_path = os.path.join(self.path, config.REPOYAML_FILENAME)
+ dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData()
+ if os.path.exists(standalone_yaml_path):
+ with open(standalone_yaml_path, "r", encoding="utf-8") as f:
+ standalone_yaml_data = yaml.safe_load(f.read())
+ if standalone_yaml_data:
+ _dataset_card_data_dict = dataset_card_data.to_dict()
+ _dataset_card_data_dict.update(standalone_yaml_data)
+ dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ # we need a set of data files to find which dataset builder to use
+ # because we need to infer module name by files extensions
+ base_path = Path(self.path, self.data_dir or "").expanduser().resolve().as_posix()
+ if self.data_files is not None:
+ patterns = sanitize_patterns(self.data_files)
+ elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
+ patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
+ else:
+ patterns = get_data_patterns(base_path)
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ base_path=base_path,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ )
+ module_name, default_builder_kwargs = infer_module_for_data_files(
+ data_files=data_files,
+ path=self.path,
+ )
+ data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
+ # Collect metadata files if the module supports them
+ supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, base_path=base_path)
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
+ if metadata_configs:
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ base_path=base_path,
+ supports_metadata=supports_metadata,
+ default_builder_kwargs=default_builder_kwargs,
+ )
+ else:
+ builder_configs: List[BuilderConfig] = [
+ import_main_class(module_path).BUILDER_CONFIG_CLASS(
+ data_files=data_files,
+ **default_builder_kwargs,
+ )
+ ]
+ default_config_name = None
+ builder_kwargs = {
+ "base_path": self.path,
+ "dataset_name": camelcase_to_snakecase(Path(self.path).name),
+ }
+ if self.data_dir:
+ builder_kwargs["data_files"] = data_files
+ # this file is deprecated and was created automatically in old versions of push_to_hub
+ if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)):
+ with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
+ legacy_dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ if len(legacy_dataset_infos) == 1:
+ # old config e.g. named "username--dataset_name"
+ legacy_config_name = next(iter(legacy_dataset_infos))
+ legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
+ legacy_dataset_infos.update(dataset_infos)
+ dataset_infos = legacy_dataset_infos
+ if default_config_name is None and len(dataset_infos) == 1:
+ default_config_name = next(iter(dataset_infos))
+
+ hash = Hasher.hash({"dataset_infos": dataset_infos, "builder_configs": builder_configs})
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class PackagedDatasetModuleFactory(_DatasetModuleFactory):
+ """Get the dataset builder module from the ones that are packaged with the library: csv, json, etc."""
+
+ def __init__(
+ self,
+ name: str,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ self.name = name
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_config = download_config
+ self.download_mode = download_mode
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ base_path = Path(self.data_dir or "").expanduser().resolve().as_posix()
+ patterns = sanitize_patterns(self.data_files) if self.data_files is not None else get_data_patterns(base_path)
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ download_config=self.download_config,
+ base_path=base_path,
+ )
+ supports_metadata = self.name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(
+ metadata_patterns, download_config=self.download_config, base_path=base_path
+ )
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, hash = _PACKAGED_DATASETS_MODULES[self.name]
+
+ builder_kwargs = {
+ "data_files": data_files,
+ "dataset_name": self.name,
+ }
+
+ return DatasetModule(module_path, hash, builder_kwargs)
+
+
+class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
+ """
+ Get the module of a dataset loaded from data files of a dataset repository.
+ The dataset builder module to use is inferred from the data files extensions.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(
+ self.name,
+ revision=self.revision,
+ token=self.download_config.token,
+ timeout=100.0,
+ )
+ # even if metadata_configs is not None (which means that we will resolve files for each config later)
+ # we cannot skip resolving all files because we need to infer module name by files extensions
+ revision = hfh_dataset_info.sha # fix the revision in case there are new commits in the meantime
+ base_path = f"hf://datasets/{self.name}@{revision}/{self.data_dir or ''}".rstrip("/")
+
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading readme"
+ try:
+ dataset_readme_path = cached_path(
+ hf_hub_url(self.name, config.REPOCARD_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ dataset_card_data = DatasetCard.load(Path(dataset_readme_path)).data
+ except FileNotFoundError:
+ dataset_card_data = DatasetCardData()
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading standalone yaml"
+ try:
+ standalone_yaml_path = cached_path(
+ hf_hub_url(self.name, config.REPOYAML_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ with open(standalone_yaml_path, "r", encoding="utf-8") as f:
+ standalone_yaml_data = yaml.safe_load(f.read())
+ if standalone_yaml_data:
+ _dataset_card_data_dict = dataset_card_data.to_dict()
+ _dataset_card_data_dict.update(standalone_yaml_data)
+ dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
+ except FileNotFoundError:
+ pass
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ # we need a set of data files to find which dataset builder to use
+ # because we need to infer module name by files extensions
+ if self.data_files is not None:
+ patterns = sanitize_patterns(self.data_files)
+ elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
+ patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
+ else:
+ patterns = get_data_patterns(base_path, download_config=self.download_config)
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ base_path=base_path,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ download_config=self.download_config,
+ )
+ module_name, default_builder_kwargs = infer_module_for_data_files(
+ data_files=data_files,
+ path=self.name,
+ download_config=self.download_config,
+ )
+ data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
+ # Collect metadata files if the module supports them
+ supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(
+ metadata_patterns, download_config=self.download_config, base_path=base_path
+ )
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
+ if metadata_configs:
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ base_path=base_path,
+ supports_metadata=supports_metadata,
+ default_builder_kwargs=default_builder_kwargs,
+ download_config=self.download_config,
+ )
+ else:
+ builder_configs: List[BuilderConfig] = [
+ import_main_class(module_path).BUILDER_CONFIG_CLASS(
+ data_files=data_files,
+ **default_builder_kwargs,
+ )
+ ]
+ default_config_name = None
+ builder_kwargs = {
+ "base_path": hf_hub_url(self.name, "", revision=revision).rstrip("/"),
+ "repo_id": self.name,
+ "dataset_name": camelcase_to_snakecase(Path(self.name).name),
+ }
+ if self.data_dir:
+ builder_kwargs["data_files"] = data_files
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading metadata"
+ try:
+ # this file is deprecated and was created automatically in old versions of push_to_hub
+ dataset_infos_path = cached_path(
+ hf_hub_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ legacy_dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ if len(legacy_dataset_infos) == 1:
+ # old config e.g. named "username--dataset_name"
+ legacy_config_name = next(iter(legacy_dataset_infos))
+ legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
+ legacy_dataset_infos.update(dataset_infos)
+ dataset_infos = legacy_dataset_infos
+ except FileNotFoundError:
+ pass
+ if default_config_name is None and len(dataset_infos) == 1:
+ default_config_name = next(iter(dataset_infos))
+
+ hash = revision
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class HubDatasetModuleFactoryWithParquetExport(_DatasetModuleFactory):
+ """
+ Get the module of a dataset loaded from parquet files of a dataset repository parquet export.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config or DownloadConfig()
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ exported_parquet_files = _datasets_server.get_exported_parquet_files(
+ dataset=self.name, revision=self.revision, token=self.download_config.token
+ )
+ exported_dataset_infos = _datasets_server.get_exported_dataset_infos(
+ dataset=self.name, revision=self.revision, token=self.download_config.token
+ )
+ dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
+ for config_name in exported_dataset_infos
+ }
+ )
+ hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(
+ self.name,
+ revision="refs/convert/parquet",
+ token=self.download_config.token,
+ timeout=100.0,
+ )
+ revision = hfh_dataset_info.sha # fix the revision in case there are new commits in the meantime
+ metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(
+ revision=revision, exported_parquet_files=exported_parquet_files, dataset_infos=dataset_infos
+ )
+ module_path, _ = _PACKAGED_DATASETS_MODULES["parquet"]
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ supports_metadata=False,
+ download_config=self.download_config,
+ )
+ hash = self.revision
+ builder_kwargs = {
+ "repo_id": self.name,
+ "dataset_name": camelcase_to_snakecase(Path(self.name).name),
+ }
+
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory):
+ """
+ Get the module of a dataset from a dataset repository.
+ The dataset script comes from the script inside the dataset repository.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+ increase_load_count(name, resource_type="dataset")
+
+ def download_loading_script(self) -> str:
+ file_path = hf_hub_url(self.name, self.name.split("/")[-1] + ".py", revision=self.revision)
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading builder script"
+ return cached_path(file_path, download_config=download_config)
+
+ def download_dataset_infos_file(self) -> str:
+ dataset_infos = hf_hub_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.revision)
+ # Download the dataset infos file if available
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading metadata"
+ try:
+ return cached_path(
+ dataset_infos,
+ download_config=download_config,
+ )
+ except (FileNotFoundError, ConnectionError):
+ return None
+
+ def download_dataset_readme_file(self) -> str:
+ readme_url = hf_hub_url(self.name, config.REPOCARD_FILENAME, revision=self.revision)
+ # Download the dataset infos file if available
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading readme"
+ try:
+ return cached_path(
+ readme_url,
+ download_config=download_config,
+ )
+ except (FileNotFoundError, ConnectionError):
+ return None
+
+ def get_module(self) -> DatasetModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{self.name}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ local_path = self.download_loading_script()
+ dataset_infos_path = self.download_dataset_infos_file()
+ dataset_readme_path = self.download_dataset_readme_file()
+ imports = get_imports(local_path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=hf_hub_url(self.name, "", revision=self.revision),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ additional_files = []
+ if dataset_infos_path:
+ additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path))
+ if dataset_readme_path:
+ additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=local_path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ builder_kwargs = {
+ "base_path": hf_hub_url(self.name, "", revision=self.revision).rstrip("/"),
+ "repo_id": self.name,
+ }
+ return DatasetModule(module_path, hash, builder_kwargs)
+
+
+class CachedDatasetModuleFactory(_DatasetModuleFactory):
+ """
+ Get the module of a dataset that has been loaded once already and cached.
+ The script that is loaded from the cache is the most recent one with a matching name.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ cache_dir: Optional[str] = None,
+ dynamic_modules_path: Optional[str] = None,
+ ):
+ self.name = name
+ self.cache_dir = cache_dir
+ self.dynamic_modules_path = dynamic_modules_path
+ assert self.name.count("/") <= 1
+
+ def get_module(self) -> DatasetModule:
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ importable_directory_path = os.path.join(dynamic_modules_path, "datasets", self.name.replace("/", "--"))
+ hashes = (
+ [h for h in os.listdir(importable_directory_path) if len(h) == 64]
+ if os.path.isdir(importable_directory_path)
+ else None
+ )
+ if hashes:
+ # get most recent
+ def _get_modification_time(module_hash):
+ return (
+ (Path(importable_directory_path) / module_hash / (self.name.split("/")[-1] + ".py"))
+ .stat()
+ .st_mtime
+ )
+
+ hash = sorted(hashes, key=_get_modification_time)[-1]
+ warning_msg = (
+ f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
+ f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
+ f"couldn't be found locally at {self.name}"
+ )
+ if not config.HF_DATASETS_OFFLINE:
+ warning_msg += ", or remotely on the Hugging Face Hub."
+ logger.warning(warning_msg)
+ # make the new module to be noticed by the import system
+ module_path = ".".join(
+ [
+ os.path.basename(dynamic_modules_path),
+ "datasets",
+ self.name.replace("/", "--"),
+ hash,
+ self.name.split("/")[-1],
+ ]
+ )
+ importlib.invalidate_caches()
+ builder_kwargs = {
+ "repo_id": self.name,
+ }
+ return DatasetModule(module_path, hash, builder_kwargs)
+ cache_dir = os.path.expanduser(str(self.cache_dir or config.HF_DATASETS_CACHE))
+ cached_datasets_directory_path_root = os.path.join(cache_dir, self.name.replace("/", "___"))
+ cached_directory_paths = [
+ cached_directory_path
+ for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
+ if os.path.isdir(cached_directory_path)
+ ]
+ if cached_directory_paths:
+ builder_kwargs = {
+ "repo_id": self.name,
+ "dataset_name": self.name.split("/")[-1],
+ }
+ warning_msg = f"Using the latest cached version of the dataset since {self.name} couldn't be found on the Hugging Face Hub"
+ if config.HF_DATASETS_OFFLINE:
+ warning_msg += " (offline mode is enabled)."
+ logger.warning(warning_msg)
+ return DatasetModule(
+ "datasets.packaged_modules.cache.cache",
+ "auto",
+ {**builder_kwargs, "version": "auto"},
+ )
+ raise FileNotFoundError(f"Dataset {self.name} is not cached in {self.cache_dir}")
+
+
+class CachedMetricModuleFactory(_MetricModuleFactory):
+ """
+ Get the module of a metric that has been loaded once already and cached.
+ The script that is loaded from the cache is the most recent one with a matching name.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ name: str,
+ dynamic_modules_path: Optional[str] = None,
+ ):
+ self.name = name
+ self.dynamic_modules_path = dynamic_modules_path
+ assert self.name.count("/") == 0
+
+ def get_module(self) -> MetricModule:
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ importable_directory_path = os.path.join(dynamic_modules_path, "metrics", self.name)
+ hashes = (
+ [h for h in os.listdir(importable_directory_path) if len(h) == 64]
+ if os.path.isdir(importable_directory_path)
+ else None
+ )
+ if not hashes:
+ raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}")
+ # get most recent
+
+ def _get_modification_time(module_hash):
+ return (Path(importable_directory_path) / module_hash / (self.name + ".py")).stat().st_mtime
+
+ hash = sorted(hashes, key=_get_modification_time)[-1]
+ logger.warning(
+ f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
+ f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
+ f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub."
+ )
+ # make the new module to be noticed by the import system
+ module_path = ".".join([os.path.basename(dynamic_modules_path), "metrics", self.name, hash, self.name])
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+def dataset_module_factory(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str, DataFilesDict]] = None,
+ cache_dir: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ _require_default_config_name=True,
+ _require_custom_configs=False,
+ **download_kwargs,
+) -> DatasetModule:
+ """
+ Download/extract/cache a dataset module.
+
+ Dataset codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
+
+ Args:
+
+ path (str): Path or name of the dataset.
+ Depending on ``path``, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if ``path`` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. ``'./path/to/directory/with/my/csv/data'``.
+ - if ``path`` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory):
+ -> load the dataset builder from the dataset script
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
+
+ For datasets on the Hugging Face Hub (list all available datasets with ``huggingface_hub.list_datasets()``)
+
+ - if ``path`` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files.
+ - if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. ``glue``, ``squad``, ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
+ By default, the datasets and metrics are stored inside the `datasets_modules` module.
+ data_dir (:obj:`str`, optional): Directory with the data files. Used only if `data_files` is not specified,
+ in which case it's equal to pass `os.path.join(data_dir, "**")` as `data_files`.
+ data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration.
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
+ the attributes in download_config if supplied.
+
+ Returns:
+ DatasetModule
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ download_config.extract_compressed_file = True
+ download_config.force_extract = True
+ download_config.force_download = download_mode == DownloadMode.FORCE_REDOWNLOAD
+
+ filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
+ if not filename.endswith(".py"):
+ filename = filename + ".py"
+ combined_path = os.path.join(path, filename)
+
+ # We have several ways to get a dataset builder:
+ #
+ # - if path is the name of a packaged dataset module
+ # -> use the packaged module (json, csv, etc.)
+ #
+ # - if os.path.join(path, name) is a local python file
+ # -> use the module from the python file
+ # - if path is a local directory (but no python file)
+ # -> use a packaged module (csv, text etc.) based on content of the directory
+ #
+ # - if path has one "/" and is dataset repository on the HF hub with a python file
+ # -> the module from the python file in the dataset repository
+ # - if path has one "/" and is dataset repository on the HF hub without a python file
+ # -> use a packaged module (csv, text etc.) based on content of the repository
+
+ # Try packaged
+ if path in _PACKAGED_DATASETS_MODULES:
+ return PackagedDatasetModuleFactory(
+ path,
+ data_dir=data_dir,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ ).get_module()
+ # Try locally
+ elif path.endswith(filename):
+ if os.path.isfile(path):
+ return LocalDatasetModuleFactoryWithScript(
+ path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(path)}")
+ elif os.path.isfile(combined_path):
+ return LocalDatasetModuleFactoryWithScript(
+ combined_path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ elif os.path.isdir(path):
+ return LocalDatasetModuleFactoryWithoutScript(
+ path, data_dir=data_dir, data_files=data_files, download_mode=download_mode
+ ).get_module()
+ # Try remotely
+ elif is_relative_path(path) and path.count("/") <= 1:
+ try:
+ _raise_if_offline_mode_is_enabled()
+ hf_api = HfApi(config.HF_ENDPOINT)
+ try:
+ dataset_info = hf_api.dataset_info(
+ repo_id=path,
+ revision=revision,
+ token=download_config.token,
+ timeout=100.0,
+ )
+ except Exception as e: # noqa catch any exception of hf_hub and consider that the dataset doesn't exist
+ if isinstance(
+ e,
+ (
+ OfflineModeIsEnabled,
+ requests.exceptions.ConnectTimeout,
+ requests.exceptions.ConnectionError,
+ ),
+ ):
+ raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({type(e).__name__})")
+ elif "404" in str(e):
+ msg = f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed"
+ raise DatasetNotFoundError(msg + f" at revision '{revision}'" if revision else msg)
+ elif "401" in str(e):
+ msg = f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed"
+ msg = msg + f" at revision '{revision}'" if revision else msg
+ raise DatasetNotFoundError(
+ msg
+ + f". If the dataset is private or gated, make sure to log in with `huggingface-cli login` or visit the dataset page at https://huggingface.co/datasets/{path} to ask for access."
+ )
+ else:
+ raise e
+ if filename in [sibling.rfilename for sibling in dataset_info.siblings]: # contains a dataset script
+ fs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
+ if _require_custom_configs or (revision and revision != "main"):
+ can_load_config_from_parquet_export = False
+ elif _require_default_config_name:
+ with fs.open(f"datasets/{path}/{filename}", "r", encoding="utf-8") as f:
+ can_load_config_from_parquet_export = "DEFAULT_CONFIG_NAME" not in f.read()
+ else:
+ can_load_config_from_parquet_export = True
+ if config.USE_PARQUET_EXPORT and can_load_config_from_parquet_export:
+ # If the parquet export is ready (parquet files + info available for the current sha), we can use it instead
+ # This fails when the dataset has multiple configs and a default config and
+ # the user didn't specify a configuration name (_require_default_config_name=True).
+ try:
+ return HubDatasetModuleFactoryWithParquetExport(
+ path, download_config=download_config, revision=dataset_info.sha
+ ).get_module()
+ except _datasets_server.DatasetsServerError:
+ pass
+ # Otherwise we must use the dataset script if the user trusts it
+ return HubDatasetModuleFactoryWithScript(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ return HubDatasetModuleFactoryWithoutScript(
+ path,
+ revision=revision,
+ data_dir=data_dir,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ ).get_module()
+ except Exception as e1:
+ # All the attempts failed, before raising the error we should check if the module is already cached
+ try:
+ return CachedDatasetModuleFactory(
+ path, dynamic_modules_path=dynamic_modules_path, cache_dir=cache_dir
+ ).get_module()
+ except Exception:
+ # If it's not in the cache, then it doesn't exist.
+ if isinstance(e1, OfflineModeIsEnabled):
+ raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None
+ if isinstance(e1, (DataFilesNotFoundError, DatasetNotFoundError, EmptyDatasetError)):
+ raise e1 from None
+ if isinstance(e1, FileNotFoundError):
+ raise FileNotFoundError(
+ f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory. "
+ f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}"
+ ) from None
+ raise e1 from None
+ else:
+ raise FileNotFoundError(
+ f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory."
+ )
+
+
+@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+def metric_module_factory(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ **download_kwargs,
+) -> MetricModule:
+ """
+ Download/extract/cache a metric module.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Metrics codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
+
+ Args:
+
+ path (str): Path or name of the metric script.
+
+ - if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory):
+ -> load the module from the metric script
+ e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``.
+ - if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`)
+ -> load the module from the metric script in the GitHub repository at huggingface/datasets
+ e.g. ``'accuracy'`` or ``'rouge'``.
+
+ revision (Optional ``Union[str, datasets.Version]``):
+ If specified, the module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
+ By default, the datasets and metrics are stored inside the `datasets_modules` module.
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
+ the attributes in download_config if supplied.
+
+ Returns:
+ MetricModule
+ """
+ with warnings.catch_warnings():
+ # Ignore equivalent warnings to the one already issued
+ warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning)
+
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ download_config.extract_compressed_file = True
+ download_config.force_extract = True
+
+ filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
+ if not filename.endswith(".py"):
+ filename = filename + ".py"
+ combined_path = os.path.join(path, filename)
+ # Try locally
+ if path.endswith(filename):
+ if os.path.isfile(path):
+ return LocalMetricModuleFactory(
+ path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}")
+ elif os.path.isfile(combined_path):
+ return LocalMetricModuleFactory(
+ combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
+ ).get_module()
+ elif is_relative_path(path) and path.count("/") == 0:
+ try:
+ return GithubMetricModuleFactory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ except Exception as e1: # noqa all the attempts failed, before raising the error we should check if the module is already cached.
+ try:
+ return CachedMetricModuleFactory(path, dynamic_modules_path=dynamic_modules_path).get_module()
+ except Exception: # noqa if it's not in the cache, then it doesn't exist.
+ if not isinstance(e1, FileNotFoundError):
+ raise e1 from None
+ raise FileNotFoundError(
+ f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}. "
+ f"Metric '{path}' doesn't exist on the Hugging Face Hub either."
+ ) from None
+ else:
+ raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}.")
+
+
+@deprecated("Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate")
+def load_metric(
+ path: str,
+ config_name: Optional[str] = None,
+ process_id: int = 0,
+ num_process: int = 1,
+ cache_dir: Optional[str] = None,
+ experiment_id: Optional[str] = None,
+ keep_in_memory: bool = False,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ trust_remote_code: Optional[bool] = None,
+ **metric_init_kwargs,
+) -> Metric:
+ """Load a `datasets.Metric`.
+
+
+
+ Use `evaluate.load` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+
+ path (``str``):
+ path to the metric processing script with the metric builder. Can be either:
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./metrics/rouge'`` or ``'./metrics/rogue/rouge.py'``
+ - a metric identifier on the HuggingFace datasets repo (list all available metrics with ``datasets.list_metrics()``)
+ e.g. ``'rouge'`` or ``'bleu'``
+ config_name (:obj:`str`, optional): selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset)
+ process_id (:obj:`int`, optional): for distributed evaluation: id of the process
+ num_process (:obj:`int`, optional): for distributed evaluation: total number of processes
+ cache_dir (Optional str): path to store the temporary predictions and references (default to `~/.cache/huggingface/metrics/`)
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ keep_in_memory (bool): Whether to store the temporary results in memory (defaults to False)
+ download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ revision (Optional ``Union[str, datasets.Version]``): if specified, the module will be loaded from the datasets repository
+ at this version. By default, it is set to the local version of the lib. Specifying a version that is different from
+ your local version of the lib might cause compatibility issues.
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+
+ Returns:
+ `datasets.Metric`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> accuracy = load_metric('accuracy')
+ >>> accuracy.compute(references=[1, 0], predictions=[1, 1])
+ {'accuracy': 0.5}
+ ```
+ """
+ with warnings.catch_warnings():
+ # Ignore equivalent warnings to the one already issued
+ warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning)
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ metric_module = metric_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ trust_remote_code=trust_remote_code,
+ ).module_path
+ metric_cls = import_main_class(metric_module, dataset=False)
+ metric = metric_cls(
+ config_name=config_name,
+ process_id=process_id,
+ num_process=num_process,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ experiment_id=experiment_id,
+ **metric_init_kwargs,
+ )
+
+ # Download and prepare resources for the metric
+ metric.download_and_prepare(download_config=download_config)
+
+ return metric
+
+
+def load_dataset_builder(
+ path: str,
+ name: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ cache_dir: Optional[str] = None,
+ features: Optional[Features] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ storage_options: Optional[Dict] = None,
+ trust_remote_code: Optional[bool] = None,
+ _require_default_config_name=True,
+ **config_kwargs,
+) -> DatasetBuilder:
+ """Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.)
+ without downloading the dataset itself.
+
+ You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
+
+ A dataset is a directory that contains:
+
+ - some data files in generic formats (JSON, CSV, Parquet, text, etc.)
+ - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
+
+ Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
+
+ Args:
+
+ path (`str`):
+ Path or name of the dataset.
+ Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if `path` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. `'./path/to/directory/with/my/csv/data'`.
+ - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+
+ For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
+
+ - if `path` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
+ - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_dir (`str`, *optional*):
+ Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
+ the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+ features ([`Features`], *optional*):
+ Set the features type to use for this dataset.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+ storage_options (`dict`, *optional*, defaults to `None`):
+ **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the [`BuilderConfig`]
+ and used in the [`DatasetBuilder`].
+
+ Returns:
+ [`DatasetBuilder`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.info.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ if token is not None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ download_config.token = token
+ if storage_options is not None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ download_config.storage_options.update(storage_options)
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ data_dir=data_dir,
+ data_files=data_files,
+ cache_dir=cache_dir,
+ trust_remote_code=trust_remote_code,
+ _require_default_config_name=_require_default_config_name,
+ _require_custom_configs=bool(config_kwargs),
+ )
+ # Get dataset builder class from the processing script
+ builder_kwargs = dataset_module.builder_kwargs
+ data_dir = builder_kwargs.pop("data_dir", data_dir)
+ data_files = builder_kwargs.pop("data_files", data_files)
+ config_name = builder_kwargs.pop(
+ "config_name", name or dataset_module.builder_configs_parameters.default_config_name
+ )
+ dataset_name = builder_kwargs.pop("dataset_name", None)
+ info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None
+
+ if (
+ path in _PACKAGED_DATASETS_MODULES
+ and data_files is None
+ and dataset_module.builder_configs_parameters.builder_configs[0].data_files is None
+ ):
+ error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder."
+ example_extensions = [
+ extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path
+ ]
+ if example_extensions:
+ error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`'
+ raise ValueError(error_msg)
+
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name)
+ # Instantiate the dataset builder
+ builder_instance: DatasetBuilder = builder_cls(
+ cache_dir=cache_dir,
+ dataset_name=dataset_name,
+ config_name=config_name,
+ data_dir=data_dir,
+ data_files=data_files,
+ hash=dataset_module.hash,
+ info=info,
+ features=features,
+ token=token,
+ storage_options=storage_options,
+ **builder_kwargs,
+ **config_kwargs,
+ )
+ builder_instance._use_legacy_cache_dir_if_possible(dataset_module)
+
+ return builder_instance
+
+
+def load_dataset(
+ path: str,
+ name: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ split: Optional[Union[str, Split]] = None,
+ cache_dir: Optional[str] = None,
+ features: Optional[Features] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ save_infos: bool = False,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ task="deprecated",
+ streaming: bool = False,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[Dict] = None,
+ trust_remote_code: bool = None,
+ **config_kwargs,
+) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
+ """Load a dataset from the Hugging Face Hub, or a local dataset.
+
+ You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
+
+ A dataset is a directory that contains:
+
+ - some data files in generic formats (JSON, CSV, Parquet, text, etc.).
+ - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
+
+ Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
+
+ This function does the following under the hood:
+
+ 1. Download and import in the library the dataset script from `path` if it's not already cached inside the library.
+
+ If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.)
+
+ Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset,
+ contain the path or URL to the original data files and the code to load examples from the original data files.
+
+ You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets).
+
+ 2. Run the dataset script which will:
+
+ * Download the dataset file from the original URL (see the script) if it's not already available locally or cached.
+ * Process and cache the dataset in typed Arrow tables for caching.
+
+ Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types.
+ They can be directly accessed from disk, loaded in RAM or even streamed over the web.
+
+ 3. Return a dataset built from the requested splits in `split` (default: all).
+
+ It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script.
+ In this case, it automatically loads all the data files from the directory or the dataset repository.
+
+ Args:
+
+ path (`str`):
+ Path or name of the dataset.
+ Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if `path` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. `'./path/to/directory/with/my/csv/data'`.
+ - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+
+ For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
+
+ - if `path` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
+ - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_dir (`str`, *optional*):
+ Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
+ the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ split (`Split` or `str`):
+ Which split of the data to load.
+ If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
+ If given, will return a single Dataset.
+ Splits can be combined and specified like in tensorflow-datasets.
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+ features (`Features`, *optional*):
+ Set the features type to use for this dataset.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the dataset
+ will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
+ nonzero. See more details in the [improve performance](../cache#improve-performance) section.
+ save_infos (`bool`, defaults to `False`):
+ Save the dataset information (checksums/size/splits/...).
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+ task (`str`):
+ The task to prepare the dataset for during training and evaluation. Casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
+
+
+
+ `task` was deprecated in version 2.13.0 and will be removed in 3.0.0.
+
+
+ streaming (`bool`, defaults to `False`):
+ If set to `True`, don't download the data files. Instead, it streams the data progressively while
+ iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case.
+
+ Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
+ Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
+ like rar and xz are not yet supported. The tgz format doesn't allow streaming.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*, defaults to `None`):
+ **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the `BuilderConfig`
+ and used in the [`DatasetBuilder`].
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - if `split` is not `None`: the dataset requested,
+ - if `split` is `None`, a [`~datasets.DatasetDict`] with each split.
+
+ or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True`
+
+ - if `split` is not `None`, the dataset is requested
+ - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split.
+
+ Example:
+
+ Load a dataset from the Hugging Face Hub:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='train')
+
+ # Map data files to splits
+ >>> data_files = {'train': 'train.csv', 'test': 'test.csv'}
+ >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files)
+ ```
+
+ Load a local dataset:
+
+ ```py
+ # Load a CSV file
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv')
+
+ # Load a JSON file
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json')
+
+ # Load from a local loading script
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train')
+ ```
+
+ Load an [`~datasets.IterableDataset`]:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True)
+ ```
+
+ Load an image dataset with the `ImageFolder` dataset builder:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train')
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if ignore_verifications != "deprecated":
+ verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if task != "deprecated":
+ warnings.warn(
+ "'task' was deprecated in version 2.13.0 and will be removed in 3.0.0.\n",
+ FutureWarning,
+ )
+ else:
+ task = None
+ if data_files is not None and not data_files:
+ raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).")
+ if Path(path, config.DATASET_STATE_JSON_FILENAME).exists():
+ raise ValueError(
+ "You are trying to load a dataset that was saved using `save_to_disk`. "
+ "Please use `load_from_disk` instead."
+ )
+
+ if streaming and num_proc is not None:
+ raise NotImplementedError(
+ "Loading a streaming dataset in parallel with `num_proc` is not implemented. "
+ "To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead."
+ )
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ verification_mode = VerificationMode(
+ (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS
+ )
+
+ # Create a dataset builder
+ builder_instance = load_dataset_builder(
+ path=path,
+ name=name,
+ data_dir=data_dir,
+ data_files=data_files,
+ cache_dir=cache_dir,
+ features=features,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ storage_options=storage_options,
+ trust_remote_code=trust_remote_code,
+ _require_default_config_name=name is None,
+ **config_kwargs,
+ )
+
+ # Return iterable dataset in case of streaming
+ if streaming:
+ return builder_instance.as_streaming_dataset(split=split)
+
+ # Some datasets are already processed on the HF google storage
+ # Don't try downloading from Google storage for the packaged datasets as text, json, csv or pandas
+ try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
+
+ # Download and prepare data
+ builder_instance.download_and_prepare(
+ download_config=download_config,
+ download_mode=download_mode,
+ verification_mode=verification_mode,
+ try_from_hf_gcs=try_from_hf_gcs,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ )
+
+ # Build dataset for splits
+ keep_in_memory = (
+ keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
+ )
+ ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory)
+ # Rename and cast features to match task schema
+ if task is not None:
+ # To avoid issuing the same warning twice
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", FutureWarning)
+ ds = ds.prepare_for_task(task)
+ if save_infos:
+ builder_instance._save_infos()
+
+ return ds
+
+
+def load_from_disk(
+ dataset_path: str, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None
+) -> Union[Dataset, DatasetDict]:
+ """
+ Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or
+ from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g.
+ `"s3://my-bucket/dataset/train"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset will be
+ loaded from.
+ fs (`~filesystems.S3FileSystem` or `fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the dataset
+ will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
+ nonzero. See more details in the [improve performance](../cache#improve-performance) section.
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - If `dataset_path` is a path of a dataset directory: the dataset requested.
+ - If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_from_disk
+ >>> ds = load_from_disk('path/to/dataset/directory')
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options)
+ if not fs.exists(dataset_path):
+ raise FileNotFoundError(f"Directory {dataset_path} not found")
+ if fs.isfile(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile(
+ posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ ):
+ return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
+ elif fs.isfile(posixpath.join(dataset_path, config.DATASETDICT_JSON_FILENAME)):
+ return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
+ else:
+ raise FileNotFoundError(
+ f"Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory."
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/metric.py b/env-llmeval/lib/python3.10/site-packages/datasets/metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..187c5e5c925b71b26ca83021523dd55c28989d28
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/metric.py
@@ -0,0 +1,652 @@
+# Copyright 2020 The HuggingFace Datasets Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Metrics base class."""
+
+import os
+import types
+import uuid
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import pyarrow as pa
+from filelock import BaseFileLock, Timeout
+
+from . import config
+from .arrow_dataset import Dataset
+from .arrow_reader import ArrowReader
+from .arrow_writer import ArrowWriter
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager
+from .features import Features
+from .info import DatasetInfo, MetricInfo
+from .naming import camelcase_to_snakecase
+from .utils._filelock import FileLock
+from .utils.deprecation_utils import deprecated
+from .utils.logging import get_logger
+from .utils.py_utils import copyfunc, temp_seed
+
+
+logger = get_logger(__name__)
+
+
+class FileFreeLock(BaseFileLock):
+ """Thread lock until a file **cannot** be locked"""
+
+ def __init__(self, lock_file, *args, **kwargs):
+ self.filelock = FileLock(lock_file)
+ super().__init__(self.filelock.lock_file, *args, **kwargs)
+
+ def _acquire(self):
+ try:
+ self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
+ except Timeout:
+ # We couldn't acquire the lock, the file is locked!
+ self._context.lock_file_fd = self.filelock.lock_file
+ else:
+ # We were able to acquire the lock, the file is not yet locked!
+ self.filelock.release()
+ self._context.lock_file_fd = None
+
+ def _release(self):
+ self._context.lock_file_fd = None
+
+
+# lists - summarize long lists similarly to NumPy
+# arrays/tensors - let the frameworks control formatting
+def summarize_if_long_list(obj):
+ if not type(obj) == list or len(obj) <= 6: # noqa: E721
+ return f"{obj}"
+
+ def format_chunk(chunk):
+ return ", ".join(repr(x) for x in chunk)
+
+ return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
+
+
+class MetricInfoMixin:
+ """This base class exposes some attributes of MetricInfo
+ at the base level of the Metric for easy access.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ """
+
+ def __init__(self, info: MetricInfo):
+ self._metric_info = info
+
+ @property
+ def info(self):
+ """:class:`datasets.MetricInfo` object containing all the metadata in the metric."""
+ return self._metric_info
+
+ @property
+ def name(self) -> str:
+ return self._metric_info.metric_name
+
+ @property
+ def experiment_id(self) -> Optional[str]:
+ return self._metric_info.experiment_id
+
+ @property
+ def description(self) -> str:
+ return self._metric_info.description
+
+ @property
+ def citation(self) -> str:
+ return self._metric_info.citation
+
+ @property
+ def features(self) -> Features:
+ return self._metric_info.features
+
+ @property
+ def inputs_description(self) -> str:
+ return self._metric_info.inputs_description
+
+ @property
+ def homepage(self) -> Optional[str]:
+ return self._metric_info.homepage
+
+ @property
+ def license(self) -> str:
+ return self._metric_info.license
+
+ @property
+ def codebase_urls(self) -> Optional[List[str]]:
+ return self._metric_info.codebase_urls
+
+ @property
+ def reference_urls(self) -> Optional[List[str]]:
+ return self._metric_info.reference_urls
+
+ @property
+ def streamable(self) -> bool:
+ return self._metric_info.streamable
+
+ @property
+ def format(self) -> Optional[str]:
+ return self._metric_info.format
+
+
+class Metric(MetricInfoMixin):
+ """A Metric is the base class and common API for all metrics.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
+ to be overridden when the metric loading script is modified.
+ keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings.
+ cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
+ The data directory should be located on a shared file-system in distributed setups.
+ num_process (``int``): specify the total number of nodes in a distributed settings.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
+ timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ config_name: Optional[str] = None,
+ keep_in_memory: bool = False,
+ cache_dir: Optional[str] = None,
+ num_process: int = 1,
+ process_id: int = 0,
+ seed: Optional[int] = None,
+ experiment_id: Optional[str] = None,
+ max_concurrent_cache_files: int = 10000,
+ timeout: Union[int, float] = 100,
+ **kwargs,
+ ):
+ # prepare info
+ self.config_name = config_name or "default"
+ info = self._info()
+ info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
+ info.config_name = self.config_name
+ info.experiment_id = experiment_id or "default_experiment"
+ MetricInfoMixin.__init__(self, info) # For easy access on low level
+
+ # Safety checks on num_process and process_id
+ if not isinstance(process_id, int) or process_id < 0:
+ raise ValueError("'process_id' should be a number greater than 0")
+ if not isinstance(num_process, int) or num_process <= process_id:
+ raise ValueError("'num_process' should be a number greater than process_id")
+ if keep_in_memory and num_process != 1:
+ raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
+
+ self.num_process = num_process
+ self.process_id = process_id
+ self.max_concurrent_cache_files = max_concurrent_cache_files
+
+ self.keep_in_memory = keep_in_memory
+ self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
+ self.data_dir = self._build_data_dir()
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ self.seed: int = seed[pos] if pos < 624 else seed[0]
+ else:
+ self.seed: int = seed
+ self.timeout: Union[int, float] = timeout
+
+ # Update 'compute' and 'add' docstring
+ # methods need to be copied otherwise it changes the docstrings of every instance
+ self.compute = types.MethodType(copyfunc(self.compute), self)
+ self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
+ self.add = types.MethodType(copyfunc(self.add), self)
+ self.compute.__func__.__doc__ += self.info.inputs_description
+ self.add_batch.__func__.__doc__ += self.info.inputs_description
+ self.add.__func__.__doc__ += self.info.inputs_description
+
+ # self.arrow_schema = pa.schema(field for field in self.info.features.type)
+ self.buf_writer = None
+ self.writer = None
+ self.writer_batch_size = None
+ self.data = None
+
+ # This is the cache file we store our predictions/references in
+ # Keep it None for now so we can (cloud)pickle the object
+ self.cache_file_name = None
+ self.filelock = None
+ self.rendez_vous_lock = None
+
+ # This is all the cache files on which we have a lock when we are in a distributed setting
+ self.file_paths = None
+ self.filelocks = None
+
+ def __len__(self):
+ """Return the number of examples (predictions or predictions/references pair)
+ currently stored in the metric's cache.
+ """
+ return 0 if self.writer is None else len(self.writer)
+
+ def __repr__(self):
+ return (
+ f'Metric(name: "{self.name}", features: {self.features}, '
+ f'usage: """{self.inputs_description}""", '
+ f"stored examples: {len(self)})"
+ )
+
+ def _build_data_dir(self):
+ """Path of this metric in cache_dir:
+ Will be:
+ self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ builder_data_dir = self._data_dir_root
+ builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
+ os.makedirs(builder_data_dir, exist_ok=True)
+ return builder_data_dir
+
+ def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
+ """Create a new cache file. If the default cache file is used, we generated a new hash."""
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
+ filelock = None
+ for i in range(self.max_concurrent_cache_files):
+ filelock = FileLock(file_path + ".lock")
+ try:
+ filelock.acquire(timeout=timeout)
+ except Timeout:
+ # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
+ # We raise an error
+ if self.num_process != 1:
+ raise ValueError(
+ f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
+ f"between distributed metric instances."
+ ) from None
+ if i == self.max_concurrent_cache_files - 1:
+ raise ValueError(
+ f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
+ f"You should set a larger value of max_concurrent_cache_files when creating the metric "
+ f"(current value is {self.max_concurrent_cache_files})."
+ ) from None
+ # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
+ file_uuid = str(uuid.uuid4())
+ file_path = os.path.join(
+ self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
+ )
+ else:
+ break
+
+ return file_path, filelock
+
+ def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
+ """Get a lock on all the cache files in a distributed setup.
+ We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
+ """
+ if self.num_process == 1:
+ if self.cache_file_name is None:
+ raise ValueError(
+ "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
+ "at least once before calling `compute`."
+ )
+ file_paths = [self.cache_file_name]
+ else:
+ file_paths = [
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
+ for process_id in range(self.num_process)
+ ]
+
+ # Let's acquire a lock on each process files to be sure they are finished writing
+ filelocks = []
+ for process_id, file_path in enumerate(file_paths):
+ if process_id == 0: # process 0 already has its lock file
+ filelocks.append(self.filelock)
+ else:
+ filelock = FileLock(file_path + ".lock")
+ try:
+ filelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Cannot acquire lock on cached file {file_path} for process {process_id}."
+ ) from None
+ else:
+ filelocks.append(filelock)
+
+ return file_paths, filelocks
+
+ def _check_all_processes_locks(self):
+ expected_lock_file_names = [
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
+ for process_id in range(self.num_process)
+ ]
+ for expected_lock_file_name in expected_lock_file_names:
+ nofilelock = FileFreeLock(expected_lock_file_name)
+ try:
+ nofilelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
+ ) from None
+ else:
+ nofilelock.release()
+
+ def _check_rendez_vous(self):
+ expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
+ nofilelock = FileFreeLock(expected_lock_file_name)
+ try:
+ nofilelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
+ ) from None
+ else:
+ nofilelock.release()
+ lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
+ rendez_vous_lock = FileLock(lock_file_name)
+ try:
+ rendez_vous_lock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
+ else:
+ rendez_vous_lock.release()
+
+ def _finalize(self):
+ """Close all the writing process and load/gather the data
+ from all the nodes if main node or all_process is True.
+ """
+ if self.writer is not None:
+ self.writer.finalize()
+ self.writer = None
+ # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
+ if self.filelock is not None and self.process_id > 0:
+ self.filelock.release()
+
+ if self.keep_in_memory:
+ # Read the predictions and references
+ reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
+ self.data = Dataset.from_buffer(self.buf_writer.getvalue())
+
+ elif self.process_id == 0:
+ # Let's acquire a lock on each node files to be sure they are finished writing
+ file_paths, filelocks = self._get_all_cache_files()
+
+ # Read the predictions and references
+ try:
+ reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
+ self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
+ except FileNotFoundError:
+ raise ValueError(
+ "Error in finalize: another metric instance is already using the local cache file. "
+ "Please specify an experiment_id to avoid collision between distributed metric instances."
+ ) from None
+
+ # Store file paths and locks and we will release/delete them after the computation.
+ self.file_paths = file_paths
+ self.filelocks = filelocks
+
+ def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
+ """Compute the metrics.
+
+ Usage of positional arguments is not allowed to prevent mistakes.
+
+ Args:
+ predictions (list/array/tensor, optional): Predictions.
+ references (list/array/tensor, optional): References.
+ **kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute`
+ method (see details in the docstring).
+
+ Return:
+ dict or None
+
+ - Dictionary with the metrics if this metric is run on the main process (``process_id == 0``).
+ - None if the metric is not run on the main process (``process_id != 0``).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> accuracy = metric.compute(predictions=model_prediction, references=labels)
+ ```
+ """
+ all_kwargs = {"predictions": predictions, "references": references, **kwargs}
+ if predictions is None and references is None:
+ missing_kwargs = {k: None for k in self.features if k not in all_kwargs}
+ all_kwargs.update(missing_kwargs)
+ else:
+ missing_inputs = [k for k in self.features if k not in all_kwargs]
+ if missing_inputs:
+ raise ValueError(
+ f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}"
+ )
+ inputs = {input_name: all_kwargs[input_name] for input_name in self.features}
+ compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features}
+
+ if any(v is not None for v in inputs.values()):
+ self.add_batch(**inputs)
+ self._finalize()
+
+ self.cache_file_name = None
+ self.filelock = None
+
+ if self.process_id == 0:
+ self.data.set_format(type=self.info.format)
+
+ inputs = {input_name: self.data[input_name] for input_name in self.features}
+ with temp_seed(self.seed):
+ output = self._compute(**inputs, **compute_kwargs)
+
+ if self.buf_writer is not None:
+ self.buf_writer = None
+ del self.data
+ self.data = None
+ else:
+ # Release locks and delete all the cache files. Process 0 is released last.
+ for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
+ logger.info(f"Removing {file_path}")
+ del self.data
+ self.data = None
+ del self.writer
+ self.writer = None
+ os.remove(file_path)
+ filelock.release()
+
+ return output
+ else:
+ return None
+
+ def add_batch(self, *, predictions=None, references=None, **kwargs):
+ """Add a batch of predictions and references for the metric's stack.
+
+ Args:
+ predictions (list/array/tensor, optional): Predictions.
+ references (list/array/tensor, optional): References.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.add_batch(predictions=model_prediction, references=labels)
+ ```
+ """
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
+ if bad_inputs:
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
+ batch = {"predictions": predictions, "references": references, **kwargs}
+ batch = {intput_name: batch[intput_name] for intput_name in self.features}
+ batch = self.info.features.encode_batch(batch)
+ if self.writer is None:
+ self._init_writer()
+ try:
+ self.writer.write_batch(batch)
+ except pa.ArrowInvalid:
+ if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
+ col0 = next(iter(batch))
+ bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
+ error_msg = (
+ f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
+ )
+ elif sorted(self.features) != ["references", "predictions"]:
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
+ error_msg_inputs = ",\n".join(
+ f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features
+ )
+ error_msg += error_msg_inputs
+ else:
+ error_msg = (
+ f"Predictions and/or references don't match the expected format.\n"
+ f"Expected format: {self.features},\n"
+ f"Input predictions: {summarize_if_long_list(predictions)},\n"
+ f"Input references: {summarize_if_long_list(references)}"
+ )
+ raise ValueError(error_msg) from None
+
+ def add(self, *, prediction=None, reference=None, **kwargs):
+ """Add one prediction and reference for the metric's stack.
+
+ Args:
+ prediction (list/array/tensor, optional): Predictions.
+ reference (list/array/tensor, optional): References.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.add(predictions=model_predictions, references=labels)
+ ```
+ """
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
+ if bad_inputs:
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
+ example = {"predictions": prediction, "references": reference, **kwargs}
+ example = {intput_name: example[intput_name] for intput_name in self.features}
+ example = self.info.features.encode_example(example)
+ if self.writer is None:
+ self._init_writer()
+ try:
+ self.writer.write(example)
+ except pa.ArrowInvalid:
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
+ error_msg_inputs = ",\n".join(
+ f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features
+ )
+ error_msg += error_msg_inputs
+ raise ValueError(error_msg) from None
+
+ def _init_writer(self, timeout=1):
+ if self.num_process > 1:
+ if self.process_id == 0:
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
+ self.rendez_vous_lock = FileLock(file_path)
+ try:
+ self.rendez_vous_lock.acquire(timeout=timeout)
+ except TimeoutError:
+ raise ValueError(
+ f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
+ f"between distributed metric instances."
+ ) from None
+
+ if self.keep_in_memory:
+ self.buf_writer = pa.BufferOutputStream()
+ self.writer = ArrowWriter(
+ features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
+ )
+ else:
+ self.buf_writer = None
+
+ # Get cache file name and lock it
+ if self.cache_file_name is None or self.filelock is None:
+ cache_file_name, filelock = self._create_cache_file() # get ready
+ self.cache_file_name = cache_file_name
+ self.filelock = filelock
+
+ self.writer = ArrowWriter(
+ features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
+ )
+ # Setup rendez-vous here if
+ if self.num_process > 1:
+ if self.process_id == 0:
+ self._check_all_processes_locks() # wait for everyone to be ready
+ self.rendez_vous_lock.release() # let everyone go
+ else:
+ self._check_rendez_vous() # wait for master to be ready and to let everyone go
+
+ def _info(self) -> MetricInfo:
+ """Construct the MetricInfo object. See `MetricInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (MetricInfo) The metrics information
+ """
+ raise NotImplementedError
+
+ def download_and_prepare(
+ self,
+ download_config: Optional[DownloadConfig] = None,
+ dl_manager: Optional[DownloadManager] = None,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ dl_manager (:class:`DownloadManager`, optional): Specific download manager to use.
+ """
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig()
+ download_config.cache_dir = os.path.join(self.data_dir, "downloads")
+ download_config.force_download = False
+
+ dl_manager = DownloadManager(
+ dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
+ )
+
+ self._download_and_prepare(dl_manager)
+
+ def _download_and_prepare(self, dl_manager):
+ """Downloads and prepares resources for the metric.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required resources for the metric.
+
+ Args:
+ dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
+ """
+ return None
+
+ def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
+ """This method defines the common API for all the metrics in the library"""
+ raise NotImplementedError
+
+ def __del__(self):
+ if hasattr(self, "filelock") and self.filelock is not None:
+ self.filelock.release()
+ if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
+ self.rendez_vous_lock.release()
+ if hasattr(self, "writer"): # in case it was already deleted
+ del self.writer
+ if hasattr(self, "data"): # in case it was already deleted
+ del self.data
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/naming.py b/env-llmeval/lib/python3.10/site-packages/datasets/naming.py
new file mode 100644
index 0000000000000000000000000000000000000000..65e7ede10dcde8701823223ae98e7971f705f945
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/naming.py
@@ -0,0 +1,84 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Utilities for file names."""
+
+import itertools
+import os
+import re
+
+
+_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
+_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
+
+_single_underscore_re = re.compile(r"(?:/\|?*"
+
+
+def camelcase_to_snakecase(name):
+ """Convert camel-case string to snake-case."""
+ name = _uppercase_uppercase_re.sub(r"\1_\2", name)
+ name = _lowercase_uppercase_re.sub(r"\1_\2", name)
+ return name.lower()
+
+
+def snakecase_to_camelcase(name):
+ """Convert snake-case string to camel-case string."""
+ name = _single_underscore_re.split(name)
+ name = [_multiple_underscores_re.split(n) for n in name]
+ return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
+
+
+def filename_prefix_for_name(name):
+ if os.path.basename(name) != name:
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
+ return camelcase_to_snakecase(name)
+
+
+def filename_prefix_for_split(name, split):
+ if os.path.basename(name) != name:
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
+ return f"{filename_prefix_for_name(name)}-{split}"
+
+
+def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
+ prefix = filename_prefix_for_split(dataset_name, split)
+ if filetype_suffix:
+ prefix += f".{filetype_suffix}"
+ filepath = os.path.join(data_dir, prefix)
+ return f"{filepath}*"
+
+
+def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
+ prefix = filename_prefix_for_split(dataset_name, split)
+ prefix = os.path.join(path, prefix)
+
+ if shard_lengths:
+ num_shards = len(shard_lengths)
+ filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
+ if filetype_suffix:
+ filenames = [filename + f".{filetype_suffix}" for filename in filenames]
+ return filenames
+ else:
+ filename = prefix
+ if filetype_suffix:
+ filename += f".{filetype_suffix}"
+ return [filename]
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/search.py b/env-llmeval/lib/python3.10/site-packages/datasets/search.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ec41bbc3e00c34d6d10e75ea05264caabc3256e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/search.py
@@ -0,0 +1,779 @@
+import importlib.util
+import os
+import tempfile
+from pathlib import PurePath
+from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union
+
+import fsspec
+import numpy as np
+
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+
+
+if TYPE_CHECKING:
+ from .arrow_dataset import Dataset # noqa: F401
+
+ try:
+ from elasticsearch import Elasticsearch # noqa: F401
+
+ except ImportError:
+ pass
+ try:
+ import faiss # noqa: F401
+
+ except ImportError:
+ pass
+
+_has_elasticsearch = importlib.util.find_spec("elasticsearch") is not None
+_has_faiss = importlib.util.find_spec("faiss") is not None
+
+
+logger = logging.get_logger(__name__)
+
+
+class MissingIndex(Exception):
+ pass
+
+
+class SearchResults(NamedTuple):
+ scores: List[float]
+ indices: List[int]
+
+
+class BatchedSearchResults(NamedTuple):
+ total_scores: List[List[float]]
+ total_indices: List[List[int]]
+
+
+class NearestExamplesResults(NamedTuple):
+ scores: List[float]
+ examples: dict
+
+
+class BatchedNearestExamplesResults(NamedTuple):
+ total_scores: List[List[float]]
+ total_examples: List[dict]
+
+
+class BaseIndex:
+ """Base class for indexing"""
+
+ def search(self, query, k: int = 10, **kwargs) -> SearchResults:
+ """
+ To implement.
+ This method has to return the scores and the indices of the retrieved examples given a certain query.
+ """
+ raise NotImplementedError
+
+ def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.
+ k (`int`): The number of examples to retrieve per query.
+
+ Ouput:
+ total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
+ total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
+ """
+ total_scores, total_indices = [], []
+ for query in queries:
+ scores, indices = self.search(query, k)
+ total_scores.append(scores)
+ total_indices.append(indices)
+ return BatchedSearchResults(total_scores, total_indices)
+
+ def save(self, file: Union[str, PurePath]):
+ """Serialize the index on disk"""
+ raise NotImplementedError
+
+ @classmethod
+ def load(cls, file: Union[str, PurePath]) -> "BaseIndex":
+ """Deserialize the index from disk"""
+ raise NotImplementedError
+
+
+class ElasticSearchIndex(BaseIndex):
+ """
+ Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.
+ An Elasticsearch server needs to be accessible, and a python client is declared with
+ ```
+ es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
+ ```
+ for example.
+ """
+
+ def __init__(
+ self,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ if not _has_elasticsearch:
+ raise ImportError(
+ "You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`"
+ )
+ if es_client is not None and (host is not None or port is not None):
+ raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.")
+ host = host or "localhost"
+ port = port or 9200
+
+ import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features
+ from elasticsearch import Elasticsearch # noqa: F811
+
+ self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}])
+ self.es_index_name = (
+ es_index_name
+ if es_index_name is not None
+ else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name)
+ )
+ self.es_index_config = (
+ es_index_config
+ if es_index_config is not None
+ else {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}},
+ }
+ )
+
+ def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None):
+ """
+ Add documents to the index.
+ If the documents are inside a certain column, you can specify it using the `column` argument.
+ """
+ index_name = self.es_index_name
+ index_config = self.es_index_config
+ self.es_client.indices.create(index=index_name, body=index_config)
+ number_of_docs = len(documents)
+ progress = hf_tqdm(unit="docs", total=number_of_docs)
+ successes = 0
+
+ def passage_generator():
+ if column is not None:
+ for i, example in enumerate(documents):
+ yield {"text": example[column], "_id": i}
+ else:
+ for i, example in enumerate(documents):
+ yield {"text": example, "_id": i}
+
+ # create the ES index
+ import elasticsearch as es
+
+ for ok, action in es.helpers.streaming_bulk(
+ client=self.es_client,
+ index=index_name,
+ actions=passage_generator(),
+ ):
+ progress.update(1)
+ successes += ok
+ if successes != len(documents):
+ logger.warning(
+ f"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}"
+ )
+ logger.info(f"Indexed {successes:d} documents")
+
+ def search(self, query: str, k=10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ query (`str`): The query as a string.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ scores (`List[List[float]`): The retrieval scores of the retrieved examples.
+ indices (`List[List[int]]`): The indices of the retrieved examples.
+ """
+ response = self.es_client.search(
+ index=self.es_index_name,
+ body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k},
+ **kwargs,
+ )
+ hits = response["hits"]["hits"]
+ return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits])
+
+ def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults:
+ import concurrent.futures
+
+ total_scores, total_indices = [None] * len(queries), [None] * len(queries)
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+ future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)}
+ for future in concurrent.futures.as_completed(future_to_index):
+ index = future_to_index[future]
+ results: SearchResults = future.result()
+ total_scores[index] = results.scores
+ total_indices[index] = results.indices
+ return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores)
+
+
+class FaissIndex(BaseIndex):
+ """
+ Dense index using Faiss. It is used to index vectors.
+ Faiss is a library for efficient similarity search and clustering of dense vectors.
+ It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.
+ You can find more information about Faiss here:
+ - For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+ - For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU
+ """
+
+ def __init__(
+ self,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ ):
+ """
+ Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+ """
+ if string_factory is not None and custom_index is not None:
+ raise ValueError("Please specify either `string_factory` or `custom_index` but not both.")
+ if device is not None and custom_index is not None:
+ raise ValueError(
+ "Cannot pass both 'custom_index' and 'device'. "
+ "Pass 'custom_index' already transferred to the target device instead."
+ )
+ self.device = device
+ self.string_factory = string_factory
+ self.metric_type = metric_type
+ self.faiss_index = custom_index
+ if not _has_faiss:
+ raise ImportError(
+ "You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. "
+ "A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. "
+ "Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available."
+ )
+
+ def add_vectors(
+ self,
+ vectors: Union[np.array, "Dataset"],
+ column: Optional[str] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: Optional[bool] = None,
+ ):
+ """
+ Add vectors to the index.
+ If the arrays are inside a certain column, you can specify it using the `column` argument.
+ """
+ import faiss # noqa: F811
+
+ # Create index
+ if self.faiss_index is None:
+ size = len(vectors[0]) if column is None else len(vectors[0][column])
+ if self.string_factory is not None:
+ if self.metric_type is None:
+ index = faiss.index_factory(size, self.string_factory)
+ else:
+ index = faiss.index_factory(size, self.string_factory, self.metric_type)
+ else:
+ if self.metric_type is None:
+ index = faiss.IndexFlat(size)
+ else:
+ index = faiss.IndexFlat(size, self.metric_type)
+
+ self.faiss_index = self._faiss_index_to_device(index, self.device)
+ logger.info(f"Created faiss index of type {type(self.faiss_index)}")
+
+ # Set verbosity level
+ if faiss_verbose is not None:
+ self.faiss_index.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None:
+ self.faiss_index.index.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None:
+ self.faiss_index.quantizer.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None:
+ self.faiss_index.clustering_index.verbose = faiss_verbose
+
+ # Train
+ if train_size is not None:
+ train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]
+ logger.info(f"Training the index with the first {len(train_vecs)} vectors")
+ self.faiss_index.train(train_vecs)
+ else:
+ logger.info("Ignored the training step of the faiss index as `train_size` is None.")
+
+ # Add vectors
+ logger.info(f"Adding {len(vectors)} vectors to the faiss index")
+ for i in hf_tqdm(range(0, len(vectors), batch_size)):
+ vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]
+ self.faiss_index.add(vecs)
+
+ @staticmethod
+ def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index":
+ """
+ Sends a faiss index to a device.
+ A device can either be a positive integer (GPU id), a negative integer (all GPUs),
+ or a list of positive integers (select GPUs to use), or `None` for CPU.
+ """
+
+ # If device is not specified, then it runs on CPU.
+ if device is None:
+ return index
+
+ import faiss # noqa: F811
+
+ # If the device id is given as an integer
+ if isinstance(device, int):
+ # Positive integers are directly mapped to GPU ids
+ if device > -1:
+ faiss_res = faiss.StandardGpuResources()
+ index = faiss.index_cpu_to_gpu(faiss_res, device, index)
+ # And negative integers mean using all GPUs
+ else:
+ index = faiss.index_cpu_to_all_gpus(index)
+ # Device ids given as a list mean mapping to those devices specified.
+ elif isinstance(device, (list, tuple)):
+ index = faiss.index_cpu_to_gpus_list(index, gpus=list(device))
+ else:
+ raise TypeError(
+ f"The argument type: {type(device)} is not expected. "
+ + "Please pass in either nothing, a positive int, a negative int, or a list of positive ints."
+ )
+
+ return index
+
+ def search(self, query: np.array, k=10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ query (`np.array`): The query as a numpy array.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ scores (`List[List[float]`): The retrieval scores of the retrieved examples.
+ indices (`List[List[int]]`): The indices of the retrieved examples.
+ """
+ if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1):
+ raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)")
+
+ queries = query.reshape(1, -1)
+ if not queries.flags.c_contiguous:
+ queries = np.asarray(queries, order="C")
+ scores, indices = self.faiss_index.search(queries, k, **kwargs)
+ return SearchResults(scores[0], indices[0].astype(int))
+
+ def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults:
+ """Find the nearest examples indices to the queries.
+
+ Args:
+ queries (`np.array`): The queries as a numpy array.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
+ total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
+ """
+ if len(queries.shape) != 2:
+ raise ValueError("Shape of query must be 2D")
+ if not queries.flags.c_contiguous:
+ queries = np.asarray(queries, order="C")
+ scores, indices = self.faiss_index.search(queries, k, **kwargs)
+ return BatchedSearchResults(scores, indices.astype(int))
+
+ def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
+ """Serialize the FaissIndex on disk"""
+ import faiss # noqa: F811
+
+ if self.device is not None and isinstance(self.device, (int, list, tuple)):
+ index = faiss.index_gpu_to_cpu(self.faiss_index)
+ else:
+ index = self.faiss_index
+
+ with fsspec.open(str(file), "wb", **(storage_options or {})) as f:
+ faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write)))
+
+ @classmethod
+ def load(
+ cls,
+ file: Union[str, PurePath],
+ device: Optional[Union[int, List[int]]] = None,
+ storage_options: Optional[Dict] = None,
+ ) -> "FaissIndex":
+ """Deserialize the FaissIndex from disk"""
+ import faiss # noqa: F811
+
+ # Instances of FaissIndex is essentially just a wrapper for faiss indices.
+ faiss_index = cls(device=device)
+ with fsspec.open(str(file), "rb", **(storage_options or {})) as f:
+ index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read)))
+ faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device)
+ return faiss_index
+
+
+class IndexableMixin:
+ """Add indexing features to `datasets.Dataset`"""
+
+ def __init__(self):
+ self._indexes: Dict[str, BaseIndex] = {}
+
+ def __len__(self):
+ raise NotImplementedError
+
+ def __getitem__(self, key):
+ raise NotImplementedError
+
+ def is_index_initialized(self, index_name: str) -> bool:
+ return index_name in self._indexes
+
+ def _check_index_is_initialized(self, index_name: str):
+ if not self.is_index_initialized(index_name):
+ raise MissingIndex(
+ f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first."
+ )
+
+ def list_indexes(self) -> List[str]:
+ """List the `colindex_nameumns`/identifiers of all the attached indexes."""
+ return list(self._indexes)
+
+ def get_index(self, index_name: str) -> BaseIndex:
+ """List the `index_name`/identifiers of all the attached indexes.
+
+ Args:
+ index_name (`str`): Index name.
+
+ Returns:
+ [`BaseIndex`]
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name]
+
+ def add_faiss_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of the specified column.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+
+ Args:
+ column (`str`): The column of the vectors to add to the index.
+ index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ By default it corresponds to `column`.
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
+ metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
+ """
+ index_name = index_name if index_name is not None else column
+ faiss_index = FaissIndex(
+ device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
+ )
+ faiss_index.add_vectors(
+ self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
+ )
+ self._indexes[index_name] = faiss_index
+
+ def add_faiss_index_from_external_arrays(
+ self,
+ external_arrays: np.array,
+ index_name: str,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of `external_arrays`.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+
+ Args:
+ external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
+ It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
+ metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
+ """
+ faiss_index = FaissIndex(
+ device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
+ )
+ faiss_index.add_vectors(
+ external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
+ )
+ self._indexes[index_name] = faiss_index
+
+ def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
+ """Save a FaissIndex on disk.
+
+ Args:
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ """
+ index = self.get_index(index_name)
+ if not isinstance(index, FaissIndex):
+ raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'")
+ index.save(file, storage_options=storage_options)
+ logger.info(f"Saved FaissIndex {index_name} at {file}")
+
+ def load_faiss_index(
+ self,
+ index_name: str,
+ file: Union[str, PurePath],
+ device: Optional[Union[int, List[int]]] = None,
+ storage_options: Optional[Dict] = None,
+ ):
+ """Load a FaissIndex from disk.
+
+ If you want to do additional configurations, you can have access to the faiss index object by doing
+ `.get_index(index_name).faiss_index` to make it fit your needs.
+
+ Args:
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to
+ call `.get_nearest` or `.search`.
+ file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ """
+ index = FaissIndex.load(file, device=device, storage_options=storage_options)
+ if index.faiss_index.ntotal != len(self):
+ raise ValueError(
+ f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples."
+ )
+ self._indexes[index_name] = index
+ logger.info(f"Loaded FaissIndex {index_name} from {file}")
+
+ def add_elasticsearch_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Add a text index using ElasticSearch for fast retrieval.
+
+ Args:
+ column (`str`): The column of the documents to add to the index.
+ index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.
+ By default it corresponds to `column`.
+ host (Optional `str`, defaults to localhost):
+ host of where ElasticSearch is running
+ port (Optional `str`, defaults to 9200):
+ port of where ElasticSearch is running
+ es_client (Optional `elasticsearch.Elasticsearch`):
+ The elasticsearch client used to create the index if host and port are None.
+ es_index_name (Optional `str`): The elasticsearch index name used to create the index.
+ es_index_config (Optional `dict`):
+ The configuration of the elasticsearch index.
+ Default config is:
+
+ Config::
+
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ """
+ index_name = index_name if index_name is not None else column
+ es_index = ElasticSearchIndex(
+ host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
+ )
+ es_index.add_documents(self, column=column)
+ self._indexes[index_name] = es_index
+
+ def load_elasticsearch_index(
+ self,
+ index_name: str,
+ es_index_name: str,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Load an existing text index using ElasticSearch for fast retrieval.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`.
+ es_index_name (`str`):
+ The name of elasticsearch index to load.
+ host (`str`, *optional*, defaults to `localhost`):
+ Host of where ElasticSearch is running.
+ port (`str`, *optional*, defaults to `9200`):
+ Port of where ElasticSearch is running.
+ es_client (`elasticsearch.Elasticsearch`, *optional*):
+ The elasticsearch client used to create the index if host and port are `None`.
+ es_index_config (`dict`, *optional*):
+ The configuration of the elasticsearch index.
+ Default config is:
+ ```
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ ```
+ """
+ self._indexes[index_name] = ElasticSearchIndex(
+ host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
+ )
+
+ def drop_index(self, index_name: str):
+ """Drop the index with the specified column.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ """
+ del self._indexes[index_name]
+
+ def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The name/identifier of the index.
+ query (`Union[str, np.ndarray]`):
+ The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve.
+
+ Returns:
+ `(scores, indices)`:
+ A tuple of `(scores, indices)` where:
+ - **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
+ - **indices** (`List[List[int]]`): the indices of the retrieved examples
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name].search(query, k, **kwargs)
+
+ def search_batch(
+ self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
+ ) -> BatchedSearchResults:
+ """Find the nearest examples indices in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ queries (`Union[List[str], np.ndarray]`):
+ The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve per query.
+
+ Returns:
+ `(total_scores, total_indices)`:
+ A tuple of `(total_scores, total_indices)` where:
+ - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
+ - **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name].search_batch(queries, k, **kwargs)
+
+ def get_nearest_examples(
+ self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs
+ ) -> NearestExamplesResults:
+ """Find the nearest examples in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The index_name/identifier of the index.
+ query (`Union[str, np.ndarray]`):
+ The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve.
+
+ Returns:
+ `(scores, examples)`:
+ A tuple of `(scores, examples)` where:
+ - **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
+ - **examples** (`dict`): the retrieved examples
+ """
+ self._check_index_is_initialized(index_name)
+ scores, indices = self.search(index_name, query, k, **kwargs)
+ top_indices = [i for i in indices if i >= 0]
+ return NearestExamplesResults(scores[: len(top_indices)], self[top_indices])
+
+ def get_nearest_examples_batch(
+ self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
+ ) -> BatchedNearestExamplesResults:
+ """Find the nearest examples in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ queries (`Union[List[str], np.ndarray]`):
+ The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve per query.
+
+ Returns:
+ `(total_scores, total_examples)`:
+ A tuple of `(total_scores, total_examples)` where:
+ - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
+ - **total_examples** (`List[dict]`): the retrieved examples per query
+ """
+ self._check_index_is_initialized(index_name)
+ total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs)
+ total_scores = [
+ scores_i[: len([i for i in indices_i if i >= 0])]
+ for scores_i, indices_i in zip(total_scores, total_indices)
+ ]
+ total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices]
+ return BatchedNearestExamplesResults(total_scores, total_samples)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/splits.py b/env-llmeval/lib/python3.10/site-packages/datasets/splits.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd4966cb4007adc9f47fd78cf2b0a1732913aaef
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/splits.py
@@ -0,0 +1,635 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Splits related API."""
+
+import abc
+import collections
+import copy
+import dataclasses
+import re
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Union
+
+from .arrow_reader import FileInstructions, make_file_instructions
+from .naming import _split_re
+from .utils.py_utils import NonMutableDict, asdict
+
+
+@dataclass
+class SplitInfo:
+ name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True})
+ num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
+ num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
+ shard_lengths: Optional[List[int]] = None
+
+ # Deprecated
+ # For backward compatibility, this field needs to always be included in files like
+ # dataset_infos.json and dataset_info.json files
+ # To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info)
+ dataset_name: Optional[str] = dataclasses.field(
+ default=None, metadata={"include_in_asdict_even_if_is_default": True}
+ )
+
+ @property
+ def file_instructions(self):
+ """Returns the list of dict(filename, take, skip)."""
+ # `self.dataset_name` is assigned in `SplitDict.add()`.
+ instructions = make_file_instructions(
+ name=self.dataset_name,
+ split_infos=[self],
+ instruction=str(self.name),
+ )
+ return instructions.file_instructions
+
+
+@dataclass
+class SubSplitInfo:
+ """Wrapper around a sub split info.
+ This class expose info on the subsplit:
+ ```
+ ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True)
+ info.splits['train[75%:]'].num_examples
+ ```
+ """
+
+ instructions: FileInstructions
+
+ @property
+ def num_examples(self):
+ """Returns the number of example in the subsplit."""
+ return self.instructions.num_examples
+
+ @property
+ def file_instructions(self):
+ """Returns the list of dict(filename, take, skip)."""
+ return self.instructions.file_instructions
+
+
+class SplitBase(metaclass=abc.ABCMeta):
+ # pylint: disable=line-too-long
+ """Abstract base class for Split compositionality.
+
+ See the
+ [guide on splits](../loading#slice-splits)
+ for more information.
+
+ There are three parts to the composition:
+ 1) The splits are composed (defined, merged, split,...) together before
+ calling the `.as_dataset()` function. This is done with the `__add__`,
+ `__getitem__`, which return a tree of `SplitBase` (whose leaf
+ are the `NamedSplit` objects)
+
+ ```
+ split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50])
+ ```
+
+ 2) The `SplitBase` is forwarded to the `.as_dataset()` function
+ to be resolved into actual read instruction. This is done by the
+ `.get_read_instruction()` method which takes the real dataset splits
+ (name, number of shards,...) and parse the tree to return a
+ `SplitReadInstruction()` object
+
+ ```
+ read_instruction = split.get_read_instruction(self.info.splits)
+ ```
+
+ 3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline
+ to define which files to read and how to skip examples within file.
+
+ """
+
+ # pylint: enable=line-too-long
+
+ @abc.abstractmethod
+ def get_read_instruction(self, split_dict):
+ """Parse the descriptor tree and compile all read instructions together.
+
+ Args:
+ split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset
+
+ Returns:
+ split_read_instruction: `SplitReadInstruction`
+ """
+ raise NotImplementedError("Abstract method")
+
+ def __eq__(self, other):
+ """Equality: datasets.Split.TRAIN == 'train'."""
+ if isinstance(other, (NamedSplit, str)):
+ return False
+ raise NotImplementedError("Equality is not implemented between merged/sub splits.")
+
+ def __ne__(self, other):
+ """InEquality: datasets.Split.TRAIN != 'test'."""
+ return not self.__eq__(other)
+
+ def __add__(self, other):
+ """Merging: datasets.Split.TRAIN + datasets.Split.TEST."""
+ return _SplitMerged(self, other)
+
+ def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name
+ """Divides this split into subsplits.
+
+ There are 3 ways to define subsplits, which correspond to the 3
+ arguments `k` (get `k` even subsplits), `percent` (get a slice of the
+ dataset with `datasets.percent`), and `weighted` (get subsplits with proportions
+ specified by `weighted`).
+
+ Example::
+
+ ```
+ # 50% train, 50% test
+ train, test = split.subsplit(k=2)
+ # 50% train, 25% test, 25% validation
+ train, test, validation = split.subsplit(weighted=[2, 1, 1])
+ # Extract last 20%
+ subsplit = split.subsplit(datasets.percent[-20:])
+ ```
+
+ Warning: k and weighted will be converted into percent which mean that
+ values below the percent will be rounded up or down. The final split may be
+ bigger to deal with remainders. For instance:
+
+ ```
+ train, test, valid = split.subsplit(k=3) # 33%, 33%, 34%
+ s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18%
+ ```
+
+ Args:
+ arg: If no kwargs are given, `arg` will be interpreted as one of
+ `k`, `percent`, or `weighted` depending on the type.
+ For example:
+ ```
+ split.subsplit(10) # Equivalent to split.subsplit(k=10)
+ split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20]
+ split.subsplit([1, 1, 2]) # weighted=[1, 1, 2]
+ ```
+ k: `int` If set, subdivide the split into `k` equal parts.
+ percent: `datasets.percent slice`, return a single subsplit corresponding to
+ a slice of the original split. For example:
+ `split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`.
+ weighted: `list[int]`, return a list of subsplits whose proportions match
+ the normalized sum of the list. For example:
+ `split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`.
+
+ Returns:
+ A subsplit or list of subsplits extracted from this split object.
+ """
+ # Note that the percent kwargs redefine the outer name datasets.percent. This
+ # is done for consistency (.subsplit(percent=datasets.percent[:40]))
+ if sum(bool(x) for x in (arg, k, percent, weighted)) != 1:
+ raise ValueError("Only one argument of subsplit should be set.")
+
+ # Auto deduce k
+ if isinstance(arg, int):
+ k = arg
+ elif isinstance(arg, slice):
+ percent = arg
+ elif isinstance(arg, list):
+ weighted = arg
+
+ if not (k or percent or weighted):
+ raise ValueError(
+ f"Invalid split argument {arg}. Only list, slice and int supported. "
+ "One of k, weighted or percent should be set to a non empty value."
+ )
+
+ def assert_slices_coverage(slices):
+ # Ensure that the expended slices cover all percents.
+ assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100))
+
+ if k:
+ if not 0 < k <= 100:
+ raise ValueError(f"Subsplit k should be between 0 and 100, got {k}")
+ shift = 100 // k
+ slices = [slice(i * shift, (i + 1) * shift) for i in range(k)]
+ # Round up last element to ensure all elements are taken
+ slices[-1] = slice(slices[-1].start, 100)
+ # Internal check to ensure full coverage
+ assert_slices_coverage(slices)
+ return tuple(_SubSplit(self, s) for s in slices)
+ elif percent:
+ return _SubSplit(self, percent)
+ elif weighted:
+ # Normalize the weighted sum
+ total = sum(weighted)
+ weighted = [100 * x // total for x in weighted]
+ # Create the slice for each of the elements
+ start = 0
+ stop = 0
+ slices = []
+ for v in weighted:
+ stop += v
+ slices.append(slice(start, stop))
+ start = stop
+ # Round up last element to ensure all elements are taken
+ slices[-1] = slice(slices[-1].start, 100)
+ # Internal check to ensure full coverage
+ assert_slices_coverage(slices)
+ return tuple(_SubSplit(self, s) for s in slices)
+ else:
+ # Should not be possible
+ raise ValueError("Could not determine the split")
+
+
+# 2 requirements:
+# 1. datasets.percent be sliceable
+# 2. datasets.percent be documented
+#
+# Instances are not documented, so we want datasets.percent to be a class, but to
+# have it be sliceable, we need this metaclass.
+class PercentSliceMeta(type):
+ def __getitem__(cls, slice_value):
+ if not isinstance(slice_value, slice):
+ raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}")
+ return slice_value
+
+
+class PercentSlice(metaclass=PercentSliceMeta):
+ # pylint: disable=line-too-long
+ """Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`.
+
+ See the
+ [guide on splits](../loading#slice-splits)
+ for more information.
+ """
+
+ # pylint: enable=line-too-long
+ pass
+
+
+percent = PercentSlice # pylint: disable=invalid-name
+
+
+class _SplitMerged(SplitBase):
+ """Represent two split descriptors merged together."""
+
+ def __init__(self, split1, split2):
+ self._split1 = split1
+ self._split2 = split2
+
+ def get_read_instruction(self, split_dict):
+ read_instruction1 = self._split1.get_read_instruction(split_dict)
+ read_instruction2 = self._split2.get_read_instruction(split_dict)
+ return read_instruction1 + read_instruction2
+
+ def __repr__(self):
+ return f"({repr(self._split1)} + {repr(self._split2)})"
+
+
+class _SubSplit(SplitBase):
+ """Represent a sub split of a split descriptor."""
+
+ def __init__(self, split, slice_value):
+ self._split = split
+ self._slice_value = slice_value
+
+ def get_read_instruction(self, split_dict):
+ return self._split.get_read_instruction(split_dict)[self._slice_value]
+
+ def __repr__(self):
+ slice_str = "{start}:{stop}"
+ if self._slice_value.step is not None:
+ slice_str += ":{step}"
+ slice_str = slice_str.format(
+ start="" if self._slice_value.start is None else self._slice_value.start,
+ stop="" if self._slice_value.stop is None else self._slice_value.stop,
+ step=self._slice_value.step,
+ )
+ return f"{repr(self._split)}(datasets.percent[{slice_str}])"
+
+
+class NamedSplit(SplitBase):
+ """Descriptor corresponding to a named split (train, test, ...).
+
+ Example:
+ Each descriptor can be composed with other using addition or slice:
+
+ ```py
+ split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST
+ ```
+
+ The resulting split will correspond to 25% of the train split merged with
+ 100% of the test split.
+
+ A split cannot be added twice, so the following will fail:
+
+ ```py
+ split = (
+ datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
+ datasets.Split.TRAIN.subsplit(datasets.percent[75:])
+ ) # Error
+ split = datasets.Split.TEST + datasets.Split.ALL # Error
+ ```
+
+ The slices can be applied only one time. So the following are valid:
+
+ ```py
+ split = (
+ datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
+ datasets.Split.TEST.subsplit(datasets.percent[:50])
+ )
+ split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50])
+ ```
+
+ But this is not valid:
+
+ ```py
+ train = datasets.Split.TRAIN
+ test = datasets.Split.TEST
+ split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25])
+ split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50])
+ ```
+ """
+
+ def __init__(self, name):
+ self._name = name
+ split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")]
+ for split_name in split_names_from_instruction:
+ if not re.match(_split_re, split_name):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.")
+
+ def __str__(self):
+ return self._name
+
+ def __repr__(self):
+ return f"NamedSplit({self._name!r})"
+
+ def __eq__(self, other):
+ """Equality: datasets.Split.TRAIN == 'train'."""
+ if isinstance(other, NamedSplit):
+ return self._name == other._name # pylint: disable=protected-access
+ elif isinstance(other, SplitBase):
+ return False
+ elif isinstance(other, str): # Other should be string
+ return self._name == other
+ else:
+ raise ValueError(f"Equality not supported between split {self} and {other}")
+
+ def __lt__(self, other):
+ return self._name < other._name # pylint: disable=protected-access
+
+ def __hash__(self):
+ return hash(self._name)
+
+ def get_read_instruction(self, split_dict):
+ return SplitReadInstruction(split_dict[self._name])
+
+
+class NamedSplitAll(NamedSplit):
+ """Split corresponding to the union of all defined dataset splits."""
+
+ def __init__(self):
+ super().__init__("all")
+
+ def __repr__(self):
+ return "NamedSplitAll()"
+
+ def get_read_instruction(self, split_dict):
+ # Merge all dataset split together
+ read_instructions = [SplitReadInstruction(s) for s in split_dict.values()]
+ return sum(read_instructions, SplitReadInstruction())
+
+
+class Split:
+ # pylint: disable=line-too-long
+ """`Enum` for dataset splits.
+
+ Datasets are typically split into different subsets to be used at various
+ stages of training and evaluation.
+
+ - `TRAIN`: the training data.
+ - `VALIDATION`: the validation data. If present, this is typically used as
+ evaluation data while iterating on a model (e.g. changing hyperparameters,
+ model architecture, etc.).
+ - `TEST`: the testing data. This is the data to report metrics on. Typically
+ you do not want to use this during model iteration as you may overfit to it.
+ - `ALL`: the union of all defined dataset splits.
+
+ All splits, including compositions inherit from `datasets.SplitBase`.
+
+ See the [guide](../load_hub#splits) on splits for more information.
+
+ Example:
+
+ ```py
+ >>> datasets.SplitGenerator(
+ ... name=datasets.Split.TRAIN,
+ ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)},
+ ... ),
+ ... datasets.SplitGenerator(
+ ... name=datasets.Split.VALIDATION,
+ ... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)},
+ ... ),
+ ... datasets.SplitGenerator(
+ ... name=datasets.Split.TEST,
+ ... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)},
+ ... )
+ ```
+ """
+
+ # pylint: enable=line-too-long
+ TRAIN = NamedSplit("train")
+ TEST = NamedSplit("test")
+ VALIDATION = NamedSplit("validation")
+ ALL = NamedSplitAll()
+
+ def __new__(cls, name):
+ """Create a custom split with datasets.Split('custom_name')."""
+ return NamedSplitAll() if name == "all" else NamedSplit(name)
+
+
+# Similar to SplitInfo, but contain an additional slice info
+SlicedSplitInfo = collections.namedtuple(
+ "SlicedSplitInfo",
+ [
+ "split_info",
+ "slice_value",
+ ],
+) # noqa: E231
+
+
+class SplitReadInstruction:
+ """Object containing the reading instruction for the dataset.
+
+ Similarly to `SplitDescriptor` nodes, this object can be composed with itself,
+ but the resolution happens instantaneously, instead of keeping track of the
+ tree, such as all instructions are compiled and flattened in a single
+ SplitReadInstruction object containing the list of files and slice to use.
+
+ Once resolved, the instructions can be accessed with:
+
+ ```
+ read_instructions.get_list_sliced_split_info() # List of splits to use
+ ```
+
+ """
+
+ def __init__(self, split_info=None):
+ self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with " "itself.")
+
+ if split_info:
+ self.add(SlicedSplitInfo(split_info=split_info, slice_value=None))
+
+ def add(self, sliced_split):
+ """Add a SlicedSplitInfo the read instructions."""
+ # TODO(epot): Check that the number of examples per shard % 100 == 0
+ # Otherwise the slices value may be unbalanced and not exactly reflect the
+ # requested slice.
+ self._splits[sliced_split.split_info.name] = sliced_split
+
+ def __add__(self, other):
+ """Merging split together."""
+ # Will raise error if a split has already be added (NonMutableDict)
+ # TODO(epot): If a split is already added but there is no overlap between
+ # the slices, should merge the slices (ex: [:10] + [80:])
+ split_instruction = SplitReadInstruction()
+ split_instruction._splits.update(self._splits) # pylint: disable=protected-access
+ split_instruction._splits.update(other._splits) # pylint: disable=protected-access
+ return split_instruction
+
+ def __getitem__(self, slice_value):
+ """Sub-splits."""
+ # Will raise an error if a split has already been sliced
+ split_instruction = SplitReadInstruction()
+ for v in self._splits.values():
+ if v.slice_value is not None:
+ raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced")
+ v = v._asdict()
+ v["slice_value"] = slice_value
+ split_instruction.add(SlicedSplitInfo(**v))
+ return split_instruction
+
+ def get_list_sliced_split_info(self):
+ return list(self._splits.values())
+
+
+class SplitDict(dict):
+ """Split info object."""
+
+ def __init__(self, *args, dataset_name=None, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.dataset_name = dataset_name
+
+ def __getitem__(self, key: Union[SplitBase, str]):
+ # 1st case: The key exists: `info.splits['train']`
+ if str(key) in self:
+ return super().__getitem__(str(key))
+ # 2nd case: Uses instructions: `info.splits['train[50%]']`
+ else:
+ instructions = make_file_instructions(
+ name=self.dataset_name,
+ split_infos=self.values(),
+ instruction=key,
+ )
+ return SubSplitInfo(instructions)
+
+ def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo):
+ if key != value.name:
+ raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')")
+ super().__setitem__(key, value)
+
+ def add(self, split_info: SplitInfo):
+ """Add the split info."""
+ if split_info.name in self:
+ raise ValueError(f"Split {split_info.name} already present")
+ split_info.dataset_name = self.dataset_name
+ super().__setitem__(split_info.name, split_info)
+
+ @property
+ def total_num_examples(self):
+ """Return the total number of examples."""
+ return sum(s.num_examples for s in self.values())
+
+ @classmethod
+ def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None):
+ """Returns a new SplitDict initialized from a Dict or List of `split_infos`."""
+ if isinstance(split_infos, dict):
+ split_infos = list(split_infos.values())
+
+ if dataset_name is None:
+ dataset_name = split_infos[0].get("dataset_name") if split_infos else None
+
+ split_dict = cls(dataset_name=dataset_name)
+
+ for split_info in split_infos:
+ if isinstance(split_info, dict):
+ split_info = SplitInfo(**split_info)
+ split_dict.add(split_info)
+
+ return split_dict
+
+ def to_split_dict(self):
+ """Returns a list of SplitInfo protos that we have."""
+ out = []
+ for split_name, split_info in self.items():
+ split_info = copy.deepcopy(split_info)
+ split_info.name = split_name
+ out.append(split_info)
+ return out
+
+ def copy(self):
+ return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name)
+
+ def _to_yaml_list(self) -> list:
+ out = [asdict(s) for s in self.to_split_dict()]
+ # we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc
+ for split_info_dict in out:
+ split_info_dict.pop("shard_lengths", None)
+ # we don't need the dataset_name attribute that is deprecated
+ for split_info_dict in out:
+ split_info_dict.pop("dataset_name", None)
+ return out
+
+ @classmethod
+ def _from_yaml_list(cls, yaml_data: list) -> "SplitDict":
+ return cls.from_split_dict(yaml_data)
+
+
+@dataclass
+class SplitGenerator:
+ """Defines the split information for the generator.
+
+ This should be used as returned value of
+ `GeneratorBasedBuilder._split_generators`.
+ See `GeneratorBasedBuilder._split_generators` for more info and example
+ of usage.
+
+ Args:
+ name (`str`):
+ Name of the `Split` for which the generator will
+ create the examples.
+ **gen_kwargs (additional keyword arguments):
+ Keyword arguments to forward to the `DatasetBuilder._generate_examples` method
+ of the builder.
+
+ Example:
+
+ ```py
+ >>> datasets.SplitGenerator(
+ ... name=datasets.Split.TRAIN,
+ ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)},
+ ... )
+ ```
+ """
+
+ name: str
+ gen_kwargs: Dict = dataclasses.field(default_factory=dict)
+ split_info: SplitInfo = dataclasses.field(init=False)
+
+ def __post_init__(self):
+ self.name = str(self.name) # Make sure we convert NamedSplits in strings
+ NamedSplit(self.name) # check that it's a valid split name
+ self.split_info = SplitInfo(name=self.name)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/streaming.py b/env-llmeval/lib/python3.10/site-packages/datasets/streaming.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9e7e185a95bd4a4343e231f1ce150f0d4d8372c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/streaming.py
@@ -0,0 +1,140 @@
+import importlib
+import inspect
+from functools import wraps
+from typing import TYPE_CHECKING, Optional
+
+from .download.download_config import DownloadConfig
+from .download.streaming_download_manager import (
+ xbasename,
+ xdirname,
+ xet_parse,
+ xexists,
+ xgetsize,
+ xglob,
+ xgzip_open,
+ xisdir,
+ xisfile,
+ xjoin,
+ xlistdir,
+ xnumpy_load,
+ xopen,
+ xpandas_read_csv,
+ xpandas_read_excel,
+ xPath,
+ xpyarrow_parquet_read_table,
+ xrelpath,
+ xsio_loadmat,
+ xsplit,
+ xsplitext,
+ xwalk,
+ xxml_dom_minidom_parse,
+)
+from .utils.logging import get_logger
+from .utils.patching import patch_submodule
+from .utils.py_utils import get_imports
+
+
+logger = get_logger(__name__)
+
+
+if TYPE_CHECKING:
+ from .builder import DatasetBuilder
+
+
+def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None):
+ """Extend the module to support streaming.
+
+ We patch some functions in the module to use `fsspec` to support data streaming:
+ - We use `fsspec.open` to open and read remote files. We patch the module function:
+ - `open`
+ - We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module
+ functions:
+ - `os.path.join`
+ - `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator)
+
+ The patched functions are replaced with custom functions defined to work with the
+ :class:`~download.streaming_download_manager.StreamingDownloadManager`.
+
+ Args:
+ module_path: Path to the module to be extended.
+ download_config : mainly use use_auth_token or storage_options to support different platforms and auth types.
+ """
+
+ module = importlib.import_module(module_path)
+
+ # TODO(QL): always update the module to add subsequent new authentication without removing old ones
+ if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming:
+ if isinstance(module._patched_for_streaming, DownloadConfig):
+ module._patched_for_streaming.token = download_config.token
+ module._patched_for_streaming.storage_options = download_config.storage_options
+ return
+
+ def wrap_auth(function):
+ @wraps(function)
+ def wrapper(*args, **kwargs):
+ return function(*args, download_config=download_config, **kwargs)
+
+ wrapper._decorator_name_ = "wrap_auth"
+ return wrapper
+
+ # open files in a streaming fashion
+ patch_submodule(module, "open", wrap_auth(xopen)).start()
+ patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start()
+ patch_submodule(module, "os.walk", wrap_auth(xwalk)).start()
+ patch_submodule(module, "glob.glob", wrap_auth(xglob)).start()
+ # allow to navigate in remote zip files
+ patch_submodule(module, "os.path.join", xjoin).start()
+ patch_submodule(module, "os.path.dirname", xdirname).start()
+ patch_submodule(module, "os.path.basename", xbasename).start()
+ patch_submodule(module, "os.path.relpath", xrelpath).start()
+ patch_submodule(module, "os.path.split", xsplit).start()
+ patch_submodule(module, "os.path.splitext", xsplitext).start()
+ # allow checks on paths
+ patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start()
+ patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start()
+ patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start()
+ patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start()
+ patch_submodule(module, "pathlib.Path", xPath).start()
+ # file readers
+ patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start()
+ patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start()
+ patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start()
+ patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start()
+ patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start()
+ patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start()
+ patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start()
+ # pyarrow: do not patch pyarrow attribute in packaged modules
+ if not module.__name__.startswith("datasets.packaged_modules."):
+ patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start()
+ module._patched_for_streaming = download_config
+
+
+def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"):
+ """Extend the dataset builder module and the modules imported by it to support streaming.
+
+ Args:
+ builder (:class:`DatasetBuilder`): Dataset builder instance.
+ """
+ # this extends the open and os.path.join functions for data streaming
+ download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token)
+ extend_module_for_streaming(builder.__module__, download_config=download_config)
+ # if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils)
+ if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv
+ for imports in get_imports(inspect.getfile(builder.__class__)):
+ if imports[0] == "internal":
+ internal_import_name = imports[1]
+ internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name])
+ extend_module_for_streaming(internal_module_name, download_config=download_config)
+
+ # builders can inherit from other builders that might use streaming functionality
+ # (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation)
+ # but these parents builders are not patched automatically as they are not instantiated, so we patch them here
+ from .builder import DatasetBuilder
+
+ parent_builder_modules = [
+ cls.__module__
+ for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched
+ if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__
+ ] # check it's not a standard builder from datasets.builder
+ for module in parent_builder_modules:
+ extend_module_for_streaming(module, download_config=download_config)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/table.py b/env-llmeval/lib/python3.10/site-packages/datasets/table.py
new file mode 100644
index 0000000000000000000000000000000000000000..43aa228278f96deb09b162e17a38e07472c0fa9d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/table.py
@@ -0,0 +1,2360 @@
+import copy
+import os
+from functools import partial
+from itertools import groupby
+from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union
+
+import numpy as np
+import pyarrow as pa
+import pyarrow.compute as pc
+import pyarrow.types
+
+from . import config
+from .utils.logging import get_logger
+
+
+if TYPE_CHECKING:
+ from .features.features import Features, FeatureType
+
+
+logger = get_logger(__name__)
+
+
+def inject_arrow_table_documentation(arrow_table_method):
+ def wrapper(fn):
+ fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "")
+ fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table")
+ if hasattr(arrow_table_method, "__annotations__"):
+ fn.__annotations__ = arrow_table_method.__annotations__
+ return fn
+
+ return wrapper
+
+
+def _in_memory_arrow_table_from_file(filename: str) -> pa.Table:
+ in_memory_stream = pa.input_stream(filename)
+ opened_stream = pa.ipc.open_stream(in_memory_stream)
+ pa_table = opened_stream.read_all()
+ return pa_table
+
+
+def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table:
+ stream = pa.BufferReader(buffer)
+ opened_stream = pa.ipc.open_stream(stream)
+ table = opened_stream.read_all()
+ return table
+
+
+def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader:
+ memory_mapped_stream = pa.memory_map(filename)
+ return pa.ipc.open_stream(memory_mapped_stream)
+
+
+def read_schema_from_file(filename: str) -> pa.Schema:
+ """
+ Infer arrow table schema from file without loading whole file into memory.
+ Usefull especially while having very big files.
+ """
+ with pa.memory_map(filename) as memory_mapped_stream:
+ schema = pa.ipc.open_stream(memory_mapped_stream).schema
+ return schema
+
+
+def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table:
+ opened_stream = _memory_mapped_record_batch_reader_from_file(filename)
+ pa_table = opened_stream.read_all()
+ return pa_table
+
+
+def _deepcopy(x, memo: dict):
+ """deepcopy a regular class instance"""
+ cls = x.__class__
+ result = cls.__new__(cls)
+ memo[id(x)] = result
+ for k, v in x.__dict__.items():
+ setattr(result, k, copy.deepcopy(v, memo))
+ return result
+
+
+def _interpolation_search(arr: List[int], x: int) -> int:
+ """
+ Return the position i of a sorted array so that arr[i] <= x < arr[i+1]
+
+ Args:
+ arr (`List[int]`): non-empty sorted list of integers
+ x (`int`): query
+
+ Returns:
+ `int`: the position i so that arr[i] <= x < arr[i+1]
+
+ Raises:
+ `IndexError`: if the array is empty or if the query is outside the array values
+ """
+ i, j = 0, len(arr) - 1
+ while i < j and arr[i] <= x < arr[j]:
+ k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i]))
+ if arr[k] <= x < arr[k + 1]:
+ return k
+ elif arr[k] < x:
+ i, j = k + 1, j
+ else:
+ i, j = i, k
+ raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.")
+
+
+class IndexedTableMixin:
+ def __init__(self, table: pa.Table):
+ self._schema: pa.Schema = table.schema
+ self._batches: List[pa.RecordBatch] = [
+ recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0
+ ]
+ self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64)
+
+ def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table:
+ """
+ Create a pa.Table by gathering the records at the records at the specified indices. Should be faster
+ than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute
+ the binary searches in parallel, highly optimized C
+ """
+ if not len(indices):
+ raise ValueError("Indices must be non-empty")
+ batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1
+ return pa.Table.from_batches(
+ [
+ self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1)
+ for batch_idx, i in zip(batch_indices, indices)
+ ],
+ schema=self._schema,
+ )
+
+ def fast_slice(self, offset=0, length=None) -> pa.Table:
+ """
+ Slice the Table using interpolation search.
+ The behavior is the same as `pyarrow.Table.slice` but it's significantly faster.
+
+ Interpolation search is used to find the start and end indexes of the batches we want to keep.
+ The batches to keep are then concatenated to form the sliced Table.
+ """
+ if offset < 0:
+ raise IndexError("Offset must be non-negative")
+ elif offset >= self._offsets[-1] or (length is not None and length <= 0):
+ return pa.Table.from_batches([], schema=self._schema)
+ i = _interpolation_search(self._offsets, offset)
+ if length is None or length + offset >= self._offsets[-1]:
+ batches = self._batches[i:]
+ batches[0] = batches[0].slice(offset - self._offsets[i])
+ else:
+ j = _interpolation_search(self._offsets, offset + length - 1)
+ batches = self._batches[i : j + 1]
+ batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j])
+ batches[0] = batches[0].slice(offset - self._offsets[i])
+ return pa.Table.from_batches(batches, schema=self._schema)
+
+
+class Table(IndexedTableMixin):
+ """
+ Wraps a pyarrow Table by using composition.
+ This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`.
+
+ It implements all the basic attributes/methods of the pyarrow Table class except
+ the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column,
+ append_column, remove_column, set_column, rename_columns` and `drop`.
+
+ The implementation of these methods differs for the subclasses.
+ """
+
+ def __init__(self, table: pa.Table):
+ super().__init__(table)
+ self.table = table
+
+ def __deepcopy__(self, memo: dict):
+ # arrow tables are immutable, so there's no need to copy self.table
+ # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason
+ # by adding it to the memo, self.table won't be copied
+ memo[id(self.table)] = self.table
+ # same for the recordbatches used by the index
+ memo[id(self._batches)] = list(self._batches)
+ return _deepcopy(self, memo)
+
+ def validate(self, *args, **kwargs):
+ """
+ Perform validation checks. An exception is raised if validation fails.
+
+ By default only cheap validation checks are run. Pass `full=True`
+ for thorough validation checks (potentially `O(n)`).
+
+ Args:
+ full (`bool`, defaults to `False`):
+ If `True`, run expensive checks, otherwise cheap checks only.
+
+ Raises:
+ `pa.lib.ArrowInvalid`: if validation fails
+ """
+ return self.table.validate(*args, **kwargs)
+
+ def equals(self, *args, **kwargs):
+ """
+ Check if contents of two tables are equal.
+
+ Args:
+ other ([`~datasets.table.Table`]):
+ Table to compare against.
+ check_metadata `bool`, defaults to `False`):
+ Whether schema metadata equality should be checked as well.
+
+ Returns:
+ `bool`
+ """
+ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args)
+ kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs}
+ return self.table.equals(*args, **kwargs)
+
+ def to_batches(self, *args, **kwargs):
+ """
+ Convert Table to list of (contiguous) `RecordBatch` objects.
+
+ Args:
+ max_chunksize (`int`, defaults to `None`):
+ Maximum size for `RecordBatch` chunks. Individual chunks may be
+ smaller depending on the chunk layout of individual columns.
+
+ Returns:
+ `List[pyarrow.RecordBatch]`
+ """
+ return self.table.to_batches(*args, **kwargs)
+
+ def to_pydict(self, *args, **kwargs):
+ """
+ Convert the Table to a `dict` or `OrderedDict`.
+
+ Returns:
+ `dict`
+ """
+ return self.table.to_pydict(*args, **kwargs)
+
+ def to_pylist(self, *args, **kwargs):
+ """
+ Convert the Table to a list
+
+ Returns:
+ `list`
+ """
+ return self.table.to_pylist(*args, **kwargs)
+
+ def to_pandas(self, *args, **kwargs):
+ """
+ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ Arrow MemoryPool to use for allocations. Uses the default memory
+ pool is not passed.
+ strings_to_categorical (`bool`, defaults to `False`):
+ Encode string (UTF8) and binary types to `pandas.Categorical`.
+ categories (`list`, defaults to `empty`):
+ List of fields that should be returned as `pandas.Categorical`. Only
+ applies to table-like data structures.
+ zero_copy_only (`bool`, defaults to `False`):
+ Raise an `ArrowException` if this function call would require copying
+ the underlying data.
+ integer_object_nulls (`bool`, defaults to `False`):
+ Cast integers with nulls to objects.
+ date_as_object (`bool`, defaults to `True`):
+ Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype.
+ timestamp_as_object (`bool`, defaults to `False`):
+ Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is
+ useful if you have timestamps that don't fit in the normal date
+ range of nanosecond timestamps (1678 CE-2262 CE).
+ If `False`, all timestamps are converted to `datetime64[ns]` dtype.
+ use_threads (`bool`, defaults to `True`):
+ Whether to parallelize the conversion using multiple threads.
+ deduplicate_objects (`bool`, defaults to `False`):
+ Do not create multiple copies Python objects when created, to save
+ on memory use. Conversion will be slower.
+ ignore_metadata (`bool`, defaults to `False`):
+ If `True`, do not use the 'pandas' metadata to reconstruct the
+ DataFrame index, if present.
+ safe (`bool`, defaults to `True`):
+ For certain data types, a cast is needed in order to store the
+ data in a pandas DataFrame or Series (e.g. timestamps are always
+ stored as nanoseconds in pandas). This option controls whether it
+ is a safe cast or not.
+ split_blocks (`bool`, defaults to `False`):
+ If `True`, generate one internal "block" for each column when
+ creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this
+ can temporarily reduce memory note that various pandas operations
+ can trigger "consolidation" which may balloon memory use.
+ self_destruct (`bool`, defaults to `False`):
+ EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow
+ memory while converting the Arrow object to pandas. If you use the
+ object after calling `to_pandas` with this option it will crash your
+ program.
+ types_mapper (`function`, defaults to `None`):
+ A function mapping a pyarrow DataType to a pandas `ExtensionDtype`.
+ This can be used to override the default pandas type for conversion
+ of built-in pyarrow types or in absence of `pandas_metadata` in the
+ Table schema. The function receives a pyarrow DataType and is
+ expected to return a pandas `ExtensionDtype` or `None` if the
+ default conversion should be used for that type. If you have
+ a dictionary mapping, you can pass `dict.get` as function.
+
+ Returns:
+ `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object
+ """
+ return self.table.to_pandas(*args, **kwargs)
+
+ def to_string(self, *args, **kwargs):
+ return self.table.to_string(*args, **kwargs)
+
+ def to_reader(self, max_chunksize: Optional[int] = None):
+ """
+ Convert the Table to a RecordBatchReader.
+
+ Note that this method is zero-copy, it merely exposes the same data under a different API.
+
+ Args:
+ max_chunksize (`int`, defaults to `None`)
+ Maximum size for RecordBatch chunks. Individual chunks may be smaller depending
+ on the chunk layout of individual columns.
+
+ Returns:
+ `pyarrow.RecordBatchReader`
+ """
+ return self.table.to_reader(max_chunksize=max_chunksize)
+
+ def field(self, *args, **kwargs):
+ """
+ Select a schema field by its column name or numeric index.
+
+ Args:
+ i (`Union[int, str]`):
+ The index or name of the field to retrieve.
+
+ Returns:
+ `pyarrow.Field`
+ """
+ return self.table.field(*args, **kwargs)
+
+ def column(self, *args, **kwargs):
+ """
+ Select a column by its column name, or numeric index.
+
+ Args:
+ i (`Union[int, str]`):
+ The index or name of the column to retrieve.
+
+ Returns:
+ `pyarrow.ChunkedArray`
+ """
+ return self.table.column(*args, **kwargs)
+
+ def itercolumns(self, *args, **kwargs):
+ """
+ Iterator over all columns in their numerical order.
+
+ Yields:
+ `pyarrow.ChunkedArray`
+ """
+ return self.table.itercolumns(*args, **kwargs)
+
+ @property
+ def schema(self):
+ """
+ Schema of the table and its columns.
+
+ Returns:
+ `pyarrow.Schema`
+ """
+ return self.table.schema
+
+ @property
+ def columns(self):
+ """
+ List of all columns in numerical order.
+
+ Returns:
+ `List[pa.ChunkedArray]`
+ """
+ return self.table.columns
+
+ @property
+ def num_columns(self):
+ """
+ Number of columns in this table.
+
+ Returns:
+ int
+ """
+ return self.table.num_columns
+
+ @property
+ def num_rows(self):
+ """
+ Number of rows in this table.
+
+ Due to the definition of a table, all columns have the same number of
+ rows.
+
+ Returns:
+ int
+ """
+ return self.table.num_rows
+
+ @property
+ def shape(self):
+ """
+ Dimensions of the table: (#rows, #columns).
+
+ Returns:
+ `(int, int)`: Number of rows and number of columns.
+ """
+ return self.table.shape
+
+ @property
+ def nbytes(self):
+ """
+ Total number of bytes consumed by the elements of the table.
+ """
+ return self.table.nbytes
+
+ @property
+ def column_names(self):
+ """
+ Names of the table's columns.
+ """
+ return self.table.column_names
+
+ def __eq__(self, other):
+ return self.equals(other)
+
+ def __getitem__(self, i):
+ return self.table[i]
+
+ def __len__(self):
+ return len(self.table)
+
+ def __repr__(self):
+ return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__)
+
+ def __str__(self):
+ return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__)
+
+ def slice(self, *args, **kwargs):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ raise NotImplementedError()
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be None,
+ which deletes any existing metadata
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ raise NotImplementedError()
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`: New table without the column.
+ """
+ raise NotImplementedError()
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column set.
+ """
+ raise NotImplementedError()
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ raise NotImplementedError()
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`: New table without the columns.
+ """
+ raise NotImplementedError()
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ `datasets.table.Table`: table with only a subset of the columns
+ """
+ raise NotImplementedError()
+
+
+class TableBlock(Table):
+ """
+ `TableBlock` is the allowed class inside a `ConcanetationTable`.
+ Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`.
+ This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`.
+ """
+
+ pass
+
+
+class InMemoryTable(TableBlock):
+ """
+ The table is said in-memory when it is loaded into the user's RAM.
+
+ Pickling it does copy all the data using memory.
+ Its implementation is simple and uses the underlying pyarrow Table methods directly.
+
+ This is different from the `MemoryMapped` table, for which pickling doesn't copy all the
+ data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk.
+
+ `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
+ data bigger than memory or when you want the memory footprint of your application to
+ stay low.
+ """
+
+ @classmethod
+ def from_file(cls, filename: str):
+ table = _in_memory_arrow_table_from_file(filename)
+ return cls(table)
+
+ @classmethod
+ def from_buffer(cls, buffer: pa.Buffer):
+ table = _in_memory_arrow_table_from_buffer(buffer)
+ return cls(table)
+
+ @classmethod
+ def from_pandas(cls, *args, **kwargs):
+ """
+ Convert pandas.DataFrame to an Arrow Table.
+
+ The column types in the resulting Arrow Table are inferred from the
+ dtypes of the pandas.Series in the DataFrame. In the case of non-object
+ Series, the NumPy dtype is translated to its Arrow equivalent. In the
+ case of `object`, we need to guess the datatype by looking at the
+ Python objects in this Series.
+
+ Be aware that Series of the `object` dtype don't carry enough
+ information to always lead to a meaningful Arrow type. In the case that
+ we cannot infer a type, e.g. because the DataFrame is of length 0 or
+ the Series only contains `None/nan` objects, the type is set to
+ null. This behavior can be avoided by constructing an explicit schema
+ and passing it to this function.
+
+ Args:
+ df (`pandas.DataFrame`):
+ schema (`pyarrow.Schema`, *optional*):
+ The expected schema of the Arrow Table. This can be used to
+ indicate the type of columns if we cannot infer it automatically.
+ If passed, the output will have exactly this schema. Columns
+ specified in the schema that are not found in the DataFrame columns
+ or its index will raise an error. Additional columns or index
+ levels in the DataFrame which are not specified in the schema will
+ be ignored.
+ preserve_index (`bool`, *optional*):
+ Whether to store the index as an additional column in the resulting
+ `Table`. The default of None will store the index as a column,
+ except for RangeIndex which is stored as metadata only. Use
+ `preserve_index=True` to force it to be stored as a column.
+ nthreads (`int`, defaults to `None` (may use up to system CPU count threads))
+ If greater than 1, convert columns to Arrow in parallel using
+ indicated number of threads.
+ columns (`List[str]`, *optional*):
+ List of column to be converted. If `None`, use all columns.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions,
+
+ Returns:
+ `datasets.table.Table`:
+
+ Examples:
+ ```python
+ >>> import pandas as pd
+ >>> import pyarrow as pa
+ >>> df = pd.DataFrame({
+ ... 'int': [1, 2],
+ ... 'str': ['a', 'b']
+ ... })
+ >>> pa.Table.from_pandas(df)
+
+ ```
+ """
+ return cls(pa.Table.from_pandas(*args, **kwargs))
+
+ @classmethod
+ def from_arrays(cls, *args, **kwargs):
+ """
+ Construct a Table from Arrow arrays.
+
+ Args:
+ arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`):
+ Equal-length arrays that should form the table.
+ names (`List[str]`, *optional*):
+ Names for the table columns. If not passed, schema must be passed.
+ schema (`Schema`, defaults to `None`):
+ Schema for the created table. If not passed, names must be passed.
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_arrays(*args, **kwargs))
+
+ @classmethod
+ def from_pydict(cls, *args, **kwargs):
+ """
+ Construct a Table from Arrow arrays or columns.
+
+ Args:
+ mapping (`Union[dict, Mapping]`):
+ A mapping of strings to Arrays or Python lists.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the Mapping values
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_pydict(*args, **kwargs))
+
+ @classmethod
+ def from_pylist(cls, mapping, *args, **kwargs):
+ """
+ Construct a Table from list of rows / dictionaries.
+
+ Args:
+ mapping (`List[dict]`):
+ A mapping of strings to row values.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the Mapping values
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_pylist(mapping, *args, **kwargs))
+
+ @classmethod
+ def from_batches(cls, *args, **kwargs):
+ """
+ Construct a Table from a sequence or iterator of Arrow `RecordBatches`.
+
+ Args:
+ batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`):
+ Sequence of `RecordBatch` to be converted, all schemas must be equal.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the first `RecordBatch`.
+
+ Returns:
+ `datasets.table.Table`:
+ """
+ return cls(pa.Table.from_batches(*args, **kwargs))
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ # Use fast slicing here
+ return InMemoryTable(self.fast_slice(offset=offset, length=length))
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ return InMemoryTable(self.table.filter(*args, **kwargs))
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(table_flatten(self.table, *args, **kwargs))
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(self.table.combine_chunks(*args, **kwargs))
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(table_cast(self.table, *args, **kwargs))
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be `None`,
+ which deletes any existing metadata).
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs))
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ return InMemoryTable(self.table.add_column(*args, **kwargs))
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ return InMemoryTable(self.table.append_column(*args, **kwargs))
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ return InMemoryTable(self.table.remove_column(*args, **kwargs))
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ return InMemoryTable(self.table.set_column(*args, **kwargs))
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ return InMemoryTable(self.table.rename_columns(*args, **kwargs))
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ return InMemoryTable(self.table.drop(*args, **kwargs))
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ return InMemoryTable(self.table.select(*args, **kwargs))
+
+
+# The MemoryMappedTable needs replays to properly reload tables from the disk
+Replay = Tuple[str, tuple, dict]
+
+
+class MemoryMappedTable(TableBlock):
+ """
+ The table is said memory mapped when it doesn't use the user's RAM but loads the data
+ from the disk instead.
+
+ Pickling it doesn't copy the data into memory.
+ Instead, only the path to the memory mapped arrow file is pickled, as well as the list
+ of transforms to "replay" when reloading the table from the disk.
+
+ Its implementation requires to store an history of all the transforms that were applied
+ to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table
+ from the disk.
+
+ This is different from the `InMemoryTable` table, for which pickling does copy all the
+ data in memory.
+
+ `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
+ data bigger than memory or when you want the memory footprint of your application to
+ stay low.
+ """
+
+ def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None):
+ super().__init__(table)
+ self.path = os.path.abspath(path)
+ self.replays: List[Replay] = replays if replays is not None else []
+
+ @classmethod
+ def from_file(cls, filename: str, replays=None):
+ table = _memory_mapped_arrow_table_from_file(filename)
+ table = cls._apply_replays(table, replays)
+ return cls(table, filename, replays)
+
+ def __getstate__(self):
+ return {"path": self.path, "replays": self.replays}
+
+ def __setstate__(self, state):
+ path = state["path"]
+ replays = state["replays"]
+ table = _memory_mapped_arrow_table_from_file(path)
+ table = self._apply_replays(table, replays)
+ MemoryMappedTable.__init__(self, table, path=path, replays=replays)
+
+ @staticmethod
+ def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table:
+ if replays is not None:
+ for name, args, kwargs in replays:
+ if name == "cast":
+ table = table_cast(table, *args, **kwargs)
+ elif name == "flatten":
+ table = table_flatten(table, *args, **kwargs)
+ else:
+ table = getattr(table, name)(*args, **kwargs)
+ return table
+
+ def _append_replay(self, replay: Replay) -> List[Replay]:
+ replays = copy.deepcopy(self.replays)
+ replays.append(replay)
+ return replays
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("slice", (offset, length), {})
+ replays = self._append_replay(replay)
+ # Use fast slicing here
+ return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays)
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays)
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays)
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the ChunkedArray of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays)
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays)
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be None,
+ which deletes any existing metadata.
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays)
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays)
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays)
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays)
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays)
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays)
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays)
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays)
+
+
+# A ConcatenationTable is the concatenation of several tables.
+# The ``blocks`` attributes stores a list of list of blocks.
+# The first axis concatenates the tables along the axis 0 (it appends rows),
+# while the second axis concatenates tables along the axis 1 (it appends columns).
+TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]])
+
+
+class ConcatenationTable(Table):
+ """
+ The table comes from the concatenation of several tables called blocks.
+ It enables concatenation on both axis 0 (append rows) and axis 1 (append columns).
+
+ The underlying tables are called "blocks" and can be either `InMemoryTable`
+ or `MemoryMappedTable` objects.
+ This allows to combine tables that come from memory or that are memory mapped.
+ When a `ConcatenationTable` is pickled, then each block is pickled:
+ - the `InMemoryTable` objects are pickled by copying all the data in memory.
+ - the MemoryMappedTable objects are pickled without copying the data into memory.
+ Instead, only the path to the memory mapped arrow file is pickled, as well as the list
+ of transforms to "replays" when reloading the table from the disk.
+
+ Its implementation requires to store each block separately.
+ The `blocks` attributes stores a list of list of blocks.
+ The first axis concatenates the tables along the axis 0 (it appends rows),
+ while the second axis concatenates tables along the axis 1 (it appends columns).
+
+ If some columns are missing when concatenating on axis 0, they are filled with null values.
+ This is done using `pyarrow.concat_tables(tables, promote=True)`.
+
+ You can access the fully combined table by accessing the `ConcatenationTable.table` attribute,
+ and the blocks by accessing the `ConcatenationTable.blocks` attribute.
+ """
+
+ def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]):
+ super().__init__(table)
+ self.blocks = blocks
+ # Check that all the blocks have the right type.
+ # Only InMemoryTable and MemoryMappedTable are allowed.
+ for subtables in blocks:
+ for subtable in subtables:
+ if not isinstance(subtable, TableBlock):
+ raise TypeError(
+ "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects"
+ f", but got {subtable}."
+ )
+
+ def __getstate__(self):
+ return {"blocks": self.blocks}
+
+ def __setstate__(self, state):
+ blocks = state["blocks"]
+ table = self._concat_blocks_horizontally_and_vertically(blocks)
+ ConcatenationTable.__init__(self, table, blocks=blocks)
+
+ @staticmethod
+ def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table:
+ pa_tables = [table.table if hasattr(table, "table") else table for table in blocks]
+ if axis == 0:
+ # we set promote=True to fill missing columns with null values
+ if config.PYARROW_VERSION.major < 14:
+ return pa.concat_tables(pa_tables, promote=True)
+ else:
+ return pa.concat_tables(pa_tables, promote_options="default")
+ elif axis == 1:
+ for i, table in enumerate(pa_tables):
+ if i == 0:
+ pa_table = table
+ else:
+ for name, col in zip(table.column_names, table.columns):
+ pa_table = pa_table.append_column(name, col)
+ return pa_table
+ else:
+ raise ValueError("'axis' must be either 0 or 1")
+
+ @classmethod
+ def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table:
+ pa_tables_to_concat_vertically = []
+ for i, tables in enumerate(blocks):
+ if not tables:
+ continue
+ pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1)
+ pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated)
+ return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0)
+
+ @classmethod
+ def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer:
+ if axis is not None:
+ merged_blocks = []
+ for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)):
+ if is_in_memory:
+ block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]
+ merged_blocks += list(block_group)
+ else: # both
+ merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks]
+ if all(len(row_block) == 1 for row_block in merged_blocks):
+ merged_blocks = cls._merge_blocks(
+ [block for row_block in merged_blocks for block in row_block], axis=0
+ )
+ return merged_blocks
+
+ @classmethod
+ def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer:
+ if isinstance(blocks, TableBlock):
+ return blocks
+ elif isinstance(blocks[0], TableBlock):
+ return cls._merge_blocks(blocks, axis=0)
+ else:
+ return cls._merge_blocks(blocks)
+
+ @classmethod
+ def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable":
+ blocks = cls._consolidate_blocks(blocks)
+ if isinstance(blocks, TableBlock):
+ table = blocks
+ return cls(table.table, [[table]])
+ elif isinstance(blocks[0], TableBlock):
+ table = cls._concat_blocks(blocks, axis=0)
+ blocks = [[t] for t in blocks]
+ return cls(table, blocks)
+ else:
+ table = cls._concat_blocks_horizontally_and_vertically(blocks)
+ return cls(table, blocks)
+
+ @classmethod
+ def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable":
+ """Create `ConcatenationTable` from list of tables.
+
+ Args:
+ tables (list of `Table` or list of `pyarrow.Table`):
+ List of tables.
+ axis (`{0, 1}`, defaults to `0`, meaning over rows):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+ """
+
+ def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]:
+ if isinstance(table, pa.Table):
+ return [[InMemoryTable(table)]]
+ elif isinstance(table, ConcatenationTable):
+ return copy.deepcopy(table.blocks)
+ else:
+ return [[table]]
+
+ def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]:
+ sliced = [table.slice(0, length) for table in row_block]
+ remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block]
+ return sliced, remainder
+
+ def _split_both_like(
+ result: List[List[TableBlock]], blocks: List[List[TableBlock]]
+ ) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]:
+ """
+ Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1.
+
+ To do so, we modify both blocks sets to have the same row_blocks boundaries.
+ For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows,
+ we modify both to have 4 row_blocks of size 2, 1, 1 and 2:
+
+ [ x x x | x x x ]
+ + [ y y | y y | y y ]
+ -----------------------------
+ = [ x x | x | x | x x ]
+ [ y y | y | y | y y ]
+
+ """
+ result, blocks = list(result), list(blocks)
+ new_result, new_blocks = [], []
+ while result and blocks:
+ # we slice the longest row block to save two row blocks of same length
+ # and we replace the long row block by its remainder if necessary
+ if len(result[0][0]) > len(blocks[0][0]):
+ new_blocks.append(blocks[0])
+ sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0]))
+ new_result.append(sliced)
+ elif len(result[0][0]) < len(blocks[0][0]):
+ new_result.append(result[0])
+ sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0]))
+ new_blocks.append(sliced)
+ else:
+ new_result.append(result.pop(0))
+ new_blocks.append(blocks.pop(0))
+ if result or blocks:
+ raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows")
+ return new_result, new_blocks
+
+ def _extend_blocks(
+ result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0
+ ) -> List[List[TableBlock]]:
+ if axis == 0:
+ result.extend(blocks)
+ elif axis == 1:
+ # We make sure each row_block have the same num_rows
+ result, blocks = _split_both_like(result, blocks)
+ for i, row_block in enumerate(blocks):
+ result[i].extend(row_block)
+ return result
+
+ blocks = to_blocks(tables[0])
+ for table in tables[1:]:
+ table_blocks = to_blocks(table)
+ blocks = _extend_blocks(blocks, table_blocks, axis=axis)
+ return cls.from_blocks(blocks)
+
+ @property
+ def _slices(self):
+ offset = 0
+ for tables in self.blocks:
+ length = len(tables[0])
+ yield (offset, length)
+ offset += length
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = self.table.slice(offset, length=length)
+ length = length if length is not None else self.num_rows - offset
+ blocks = []
+ for tables in self.blocks:
+ n_rows = len(tables[0])
+ if length == 0:
+ break
+ elif n_rows <= offset:
+ offset = offset - n_rows
+ elif n_rows <= offset + length:
+ blocks.append([t.slice(offset) for t in tables])
+ length, offset = length + offset - n_rows, 0
+ else:
+ blocks.append([t.slice(offset, length) for t in tables])
+ length, offset = 0, 0
+ return ConcatenationTable(table, blocks)
+
+ def filter(self, mask, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ table = self.table.filter(mask, *args, **kwargs)
+ blocks = []
+ for (offset, length), tables in zip(self._slices, self.blocks):
+ submask = mask.slice(offset, length)
+ blocks.append([t.filter(submask, *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = table_flatten(self.table, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.flatten(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = self.table.combine_chunks(*args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.combine_chunks(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def cast(self, target_schema, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ from .features import Features
+
+ table = table_cast(self.table, target_schema, *args, **kwargs)
+ target_features = Features.from_arrow_schema(target_schema)
+ blocks = []
+ for subtables in self.blocks:
+ new_tables = []
+ fields = list(target_schema)
+ for subtable in subtables:
+ subfields = []
+ for name in subtable.column_names:
+ subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))
+ subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields})
+ subschema = subfeatures.arrow_schema
+ new_tables.append(subtable.cast(subschema, *args, **kwargs))
+ blocks.append(new_tables)
+ return ConcatenationTable(table, blocks)
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be `None`,
+ which deletes any existing metadata).
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ table = self.table.replace_schema_metadata(*args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, self.blocks)
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def remove_column(self, i, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ table = self.table.remove_column(i, *args, **kwargs)
+ name = self.table.column_names[i]
+ blocks = []
+ for tables in self.blocks:
+ blocks.append(
+ [
+ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t
+ for t in tables
+ ]
+ )
+ return ConcatenationTable(table, blocks)
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ raise NotImplementedError()
+
+ def rename_columns(self, names, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ table = self.table.rename_columns(names, *args, **kwargs)
+ names = dict(zip(self.table.column_names, names))
+ blocks = []
+ for tables in self.blocks:
+ blocks.append(
+ [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables]
+ )
+ return ConcatenationTable(table, blocks)
+
+ def drop(self, columns, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ table = self.table.drop(columns, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def select(self, columns, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ table = self.table.select(columns, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+
+def concat_tables(tables: List[Table], axis: int = 0) -> Table:
+ """
+ Concatenate tables.
+
+ Args:
+ tables (list of `Table`):
+ List of tables to be concatenated.
+ axis (`{0, 1}`, defaults to `0`, meaning over rows):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+ Returns:
+ `datasets.table.Table`:
+ If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`.
+ Otherwise if there's only one table, it is returned as is.
+ """
+ tables = list(tables)
+ if len(tables) == 1:
+ return tables[0]
+ return ConcatenationTable.from_tables(tables, axis=axis)
+
+
+def list_table_cache_files(table: Table) -> List[str]:
+ """
+ Get the cache files that are loaded by the table.
+ Cache file are used when parts of the table come from the disk via memory mapping.
+
+ Returns:
+ `List[str]`:
+ A list of paths to the cache files loaded by the table.
+ """
+ if isinstance(table, ConcatenationTable):
+ cache_files = []
+ for subtables in table.blocks:
+ for subtable in subtables:
+ cache_files += list_table_cache_files(subtable)
+ return cache_files
+ elif isinstance(table, MemoryMappedTable):
+ return [table.path]
+ else:
+ return []
+
+
+def _wrap_for_chunked_arrays(func):
+ """Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly"""
+
+ def wrapper(array, *args, **kwargs):
+ if isinstance(array, pa.ChunkedArray):
+ return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
+ else:
+ return func(array, *args, **kwargs)
+
+ return wrapper
+
+
+def _are_list_values_of_length(array: pa.ListArray, length: int) -> bool:
+ """Check if all the sub-lists of a `pa.ListArray` have the specified length."""
+ return pc.all(pc.equal(array.value_lengths(), length)).as_py() or array.null_count == len(array)
+
+
+def _combine_list_array_offsets_with_mask(array: pa.ListArray) -> pa.Array:
+ """Add the null bitmap to the offsets of a `pa.ListArray`."""
+ offsets = array.offsets
+ if array.null_count > 0:
+ offsets = pa.concat_arrays(
+ [
+ pc.replace_with_mask(offsets[:-1], array.is_null(), pa.nulls(len(array), pa.int32())),
+ offsets[-1:],
+ ]
+ )
+ return offsets
+
+
+def _storage_type(type: pa.DataType) -> pa.DataType:
+ """Convert a (possibly nested) `pa.ExtensionType` to its storage type."""
+ if isinstance(type, pa.ExtensionType):
+ return _storage_type(type.storage_type)
+ elif isinstance(type, pa.StructType):
+ return pa.struct([pa.field(field.name, _storage_type(field.type)) for field in type])
+ elif isinstance(type, pa.ListType):
+ return pa.list_(_storage_type(type.value_type))
+ elif isinstance(type, pa.FixedSizeListType):
+ return pa.list_(_storage_type(type.value_type), type.list_size)
+ return type
+
+
+@_wrap_for_chunked_arrays
+def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True):
+ """Improved version of `pa.Array.cast`
+
+ It supports casting `pa.StructArray` objects to re-order the fields.
+ It also let you control certain aspects of the casting, e.g. whether
+ to disable numbers (`floats` or `ints`) to strings.
+
+ Args:
+ array (`pa.Array`):
+ PyArrow array to cast
+ pa_type (`pa.DataType`):
+ Target PyArrow type
+ allow_number_to_str (`bool`, defaults to `True`):
+ Whether to allow casting numbers to strings.
+ Defaults to `True`.
+
+ Raises:
+ `pa.ArrowInvalidError`: if the arrow data casting fails
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+ - if casting from numbers to strings and `allow_number_to_str` is `False`
+
+ Returns:
+ `List[pyarrow.Array]`: the casted array
+ """
+ _c = partial(array_cast, allow_number_to_str=allow_number_to_str)
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if isinstance(pa_type, pa.ExtensionType):
+ return pa_type.wrap_array(_c(array, pa_type.storage_type))
+ elif array.type == pa_type:
+ return array
+ elif pa.types.is_struct(array.type):
+ if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}):
+ if array.type.num_fields == 0:
+ return array
+ arrays = [_c(array.field(field.name), field.type) for field in pa_type]
+ return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ if pa.types.is_fixed_size_list(pa_type):
+ if _are_list_values_of_length(array, pa_type.list_size):
+ if array.null_count > 0:
+ # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
+ array_type = array.type
+ storage_type = _storage_type(array_type)
+ if array_type != storage_type:
+ # Temporarily convert to the storage type to support extension types in the slice operation
+ array = _c(array, storage_type)
+ array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
+ array = _c(array, array_type)
+ else:
+ array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
+ array_values = array.values
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa_type,
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[_c(array_values, pa_type.value_type)],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
+ )
+ else:
+ array_values = array.values[
+ array.offset * pa_type.length : (array.offset + len(array)) * pa_type.length
+ ]
+ return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size)
+ elif pa.types.is_list(pa_type):
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type))
+ elif pa.types.is_fixed_size_list(array.type):
+ if pa.types.is_fixed_size_list(pa_type):
+ if pa_type.list_size == array.type.list_size:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa_type,
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[_c(array_values, pa_type.value_type)],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
+ )
+ elif pa.types.is_list(pa_type):
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null())
+ else:
+ if (
+ not allow_number_to_str
+ and pa.types.is_string(pa_type)
+ and (pa.types.is_floating(array.type) or pa.types.is_integer(array.type))
+ ):
+ raise TypeError(
+ f"Couldn't cast array of type {array.type} to {pa_type} since allow_number_to_str is set to {allow_number_to_str}"
+ )
+ if pa.types.is_null(pa_type) and not pa.types.is_null(array.type):
+ raise TypeError(f"Couldn't cast array of type {array.type} to {pa_type}")
+ return array.cast(pa_type)
+ raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{pa_type}")
+
+
+@_wrap_for_chunked_arrays
+def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True):
+ """Cast an array to the arrow type that corresponds to the requested feature type.
+ For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods
+ they defined to enable casting from other arrow types.
+
+ Args:
+ array (`pa.Array`):
+ The PyArrow array to cast.
+ feature (`datasets.features.FeatureType`):
+ The target feature type.
+ allow_number_to_str (`bool`, defaults to `True`):
+ Whether to allow casting numbers to strings.
+ Defaults to `True`.
+
+ Raises:
+ `pa.ArrowInvalidError`: if the arrow data casting fails
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+ - if casting from numbers to strings and `allow_number_to_str` is `False`
+
+ Returns:
+ array (`pyarrow.Array`): the casted array
+ """
+ from .features.features import Sequence, get_nested_type
+
+ _c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str)
+
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if hasattr(feature, "cast_storage"):
+ return feature.cast_storage(array)
+
+ elif pa.types.is_struct(array.type):
+ # feature must be a dict or Sequence(subfeatures_dict)
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
+ }
+ if isinstance(feature, dict) and {field.name for field in array.type} == set(feature):
+ if array.type.num_fields == 0:
+ return array
+ arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
+ return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ if isinstance(feature, list):
+ casted_array_values = _c(array.values, feature[0])
+ if casted_array_values.type == array.values.type:
+ return array
+ else:
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, casted_array_values)
+ elif isinstance(feature, Sequence):
+ if feature.length > -1:
+ if _are_list_values_of_length(array, feature.length):
+ if array.null_count > 0:
+ # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
+ array_type = array.type
+ storage_type = _storage_type(array_type)
+ if array_type != storage_type:
+ # Temporarily convert to the storage type to support extension types in the slice operation
+ array = array_cast(array, storage_type, allow_number_to_str=allow_number_to_str)
+ array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
+ array = array_cast(array, array_type, allow_number_to_str=allow_number_to_str)
+ else:
+ array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
+ array_values = array.values
+ casted_array_values = _c(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(casted_array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[casted_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ casted_array_values, feature.length, mask=array.is_null()
+ )
+ else:
+ array_values = array.values[
+ array.offset * feature.length : (array.offset + len(array)) * feature.length
+ ]
+ return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length)
+ else:
+ casted_array_values = _c(array.values, feature.feature)
+ if casted_array_values.type == array.values.type:
+ return array
+ else:
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, casted_array_values)
+ elif pa.types.is_fixed_size_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ if isinstance(feature, list):
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature[0]), mask=array.is_null())
+ elif isinstance(feature, Sequence):
+ if feature.length > -1:
+ if feature.length == array.type.list_size:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ casted_array_values = _c(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(casted_array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[casted_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ casted_array_values, feature.length, mask=array.is_null()
+ )
+ else:
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null())
+ if pa.types.is_null(array.type):
+ return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str)
+ elif not isinstance(feature, (Sequence, dict, list, tuple)):
+ return array_cast(array, feature(), allow_number_to_str=allow_number_to_str)
+ raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")
+
+
+@_wrap_for_chunked_arrays
+def embed_array_storage(array: pa.Array, feature: "FeatureType"):
+ """Embed data into an arrays's storage.
+ For custom features like Audio or Image, it takes into account the "embed_storage" methods
+ they define to embed external data (e.g. an image file) into an array.
+
+
+
+ Args:
+ array (`pa.Array`):
+ The PyArrow array in which to embed data.
+ feature (`datasets.features.FeatureType`):
+ Array features.
+
+ Raises:
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+
+ Returns:
+ array (`pyarrow.Array`): the casted array
+ """
+ from .features import Sequence
+
+ _e = embed_array_storage
+
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if hasattr(feature, "embed_storage"):
+ return feature.embed_storage(array)
+ elif pa.types.is_struct(array.type):
+ # feature must be a dict or Sequence(subfeatures_dict)
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
+ }
+ if isinstance(feature, dict):
+ arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()]
+ return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ if isinstance(feature, list):
+ return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature[0]))
+ if isinstance(feature, Sequence) and feature.length == -1:
+ return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature))
+ elif pa.types.is_fixed_size_list(array.type):
+ # feature must be Sequence(subfeature)
+ if isinstance(feature, Sequence) and feature.length > -1:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ embedded_array_values = _e(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[embedded_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null())
+ if not isinstance(feature, (Sequence, dict, list, tuple)):
+ return array
+ raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}")
+
+
+class CastError(ValueError):
+ """When it's not possible to cast an Arrow table to a specific schema or set of features"""
+
+ def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None:
+ super().__init__(*args)
+ self.table_column_names = table_column_names
+ self.requested_column_names = requested_column_names
+
+ def details(self):
+ new_columns = set(self.table_column_names) - set(self.requested_column_names)
+ missing_columns = set(self.requested_column_names) - set(self.table_column_names)
+ if new_columns and missing_columns:
+ return f"there are {len(new_columns)} new columns ({', '.join(new_columns)}) and {len(missing_columns)} missing columns ({', '.join(missing_columns)})."
+ elif new_columns:
+ return f"there are {len(new_columns)} new columns ({new_columns})"
+ else:
+ return f"there are {len(missing_columns)} missing columns ({missing_columns})"
+
+
+def cast_table_to_features(table: pa.Table, features: "Features"):
+ """Cast a table to the arrow schema that corresponds to the requested features.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to cast.
+ features ([`Features`]):
+ Target features.
+
+ Returns:
+ table (`pyarrow.Table`): the casted table
+ """
+ if sorted(table.column_names) != sorted(features):
+ raise CastError(
+ f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
+ table_column_names=table.column_names,
+ requested_column_names=list(features),
+ )
+ arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
+ return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
+
+
+def cast_table_to_schema(table: pa.Table, schema: pa.Schema):
+ """Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability.
+
+ Args:
+ table (`pa.Table`):
+ PyArrow table to cast.
+ features ([`Features`]):
+ Target features.
+
+ Returns:
+ `pa.Table`: the casted table
+ """
+ from .features import Features
+
+ features = Features.from_arrow_schema(schema)
+ if sorted(table.column_names) != sorted(features):
+ raise CastError(
+ f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
+ table_column_names=table.column_names,
+ requested_column_names=list(features),
+ )
+ arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
+ return pa.Table.from_arrays(arrays, schema=schema)
+
+
+def embed_table_storage(table: pa.Table):
+ """Embed external data into a table's storage.
+
+
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table in which to embed data.
+
+ Returns:
+ table (`pyarrow.Table`): the table with embedded data
+ """
+ from .features.features import Features, require_storage_embed
+
+ features = Features.from_arrow_schema(table.schema)
+ arrays = [
+ embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name]
+ for name, feature in features.items()
+ ]
+ return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
+
+
+def table_cast(table: pa.Table, schema: pa.Schema):
+ """Improved version of `pa.Table.cast`.
+
+ It supports casting to feature types stored in the schema metadata.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to cast.
+ schema (`pyarrow.Schema`):
+ Target PyArrow schema.
+
+ Returns:
+ table (`pyarrow.Table`): the casted table
+ """
+ if table.schema != schema:
+ return cast_table_to_schema(table, schema)
+ elif table.schema.metadata != schema.metadata:
+ return table.replace_schema_metadata(schema.metadata)
+ else:
+ return table
+
+
+def table_flatten(table: pa.Table):
+ """Improved version of `pa.Table.flatten`.
+
+ It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field,
+ but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False.
+
+ Args:
+ table (`pa.Table`):
+ PyArrow table to flatten.
+
+ Returns:
+ `Table`: the flattened table
+ """
+ from .features import Features
+
+ features = Features.from_arrow_schema(table.schema)
+ if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()):
+ flat_arrays = []
+ flat_column_names = []
+ for field in table.schema:
+ array = table.column(field.name)
+ subfeature = features[field.name]
+ if pa.types.is_struct(field.type) and (
+ not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature
+ ):
+ flat_arrays.extend(array.flatten())
+ flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type])
+ else:
+ flat_arrays.append(array)
+ flat_column_names.append(field.name)
+ flat_table = pa.Table.from_arrays(
+ flat_arrays,
+ names=flat_column_names,
+ )
+ else:
+ flat_table = table.flatten()
+ # Preserve complex types in the metadata
+ flat_features = features.flatten(max_depth=2)
+ flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names})
+ return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata)
+
+
+def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]):
+ """Visit all arrays in a table and apply a function to them.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to visit.
+ function (`Callable[[pa.Array], None]`):
+ Function to apply to each array.
+ """
+ from .features import Features, Sequence
+
+ features = Features.from_arrow_schema(table.schema)
+
+ def _visit(array, feature):
+ if isinstance(array, pa.ChunkedArray):
+ for chunk in array.chunks:
+ _visit(chunk, feature)
+ else:
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ function(array, feature)
+ if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"):
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length)
+ for name, subfeature in feature.feature.items()
+ }
+ for name, subfeature in feature.items():
+ _visit(array.field(name), subfeature)
+ elif pa.types.is_list(array.type):
+ if isinstance(feature, list):
+ _visit(array.values, feature[0])
+ elif isinstance(feature, Sequence):
+ _visit(array.values, feature.feature)
+
+ for name, feature in features.items():
+ _visit(table[name], feature)
+
+
+def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]:
+ """Iterate over sub-tables of size `batch_size`.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to iterate over.
+ batch_size (`int`):
+ Size of each sub-table to yield.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ for chunk in table.to_reader(max_chunksize=batch_size):
+ if len(chunk) == 0:
+ continue
+ elif chunks_buffer_size + len(chunk) < batch_size:
+ chunks_buffer.append(chunk)
+ chunks_buffer_size += len(chunk)
+ continue
+ elif chunks_buffer_size + len(chunk) == batch_size:
+ chunks_buffer.append(chunk)
+ yield pa.Table.from_batches(chunks_buffer)
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ else:
+ cropped_chunk_length = batch_size - chunks_buffer_size
+ chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
+ yield pa.Table.from_batches(chunks_buffer)
+ chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
+ chunks_buffer_size = len(chunk) - cropped_chunk_length
+ if not drop_last_batch and chunks_buffer:
+ yield pa.Table.from_batches(chunks_buffer)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__init__.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a18a1e79349cfb32a743aeca4c3e9a809645a75
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__init__.py
@@ -0,0 +1,46 @@
+from typing import Optional
+
+from ..utils.logging import get_logger
+from .audio_classification import AudioClassification
+from .automatic_speech_recognition import AutomaticSpeechRecognition
+from .base import TaskTemplate
+from .image_classification import ImageClassification
+from .language_modeling import LanguageModeling
+from .question_answering import QuestionAnsweringExtractive
+from .summarization import Summarization
+from .text_classification import TextClassification
+
+
+__all__ = [
+ "AutomaticSpeechRecognition",
+ "AudioClassification",
+ "ImageClassification",
+ "LanguageModeling",
+ "QuestionAnsweringExtractive",
+ "Summarization",
+ "TaskTemplate",
+ "TextClassification",
+]
+
+logger = get_logger(__name__)
+
+
+NAME2TEMPLATE = {
+ AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
+ AudioClassification.task: AudioClassification,
+ ImageClassification.task: ImageClassification,
+ LanguageModeling.task: LanguageModeling,
+ QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
+ Summarization.task: Summarization,
+ TextClassification.task: TextClassification,
+}
+
+
+def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
+ """Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
+ task_name = task_template_dict.get("task")
+ if task_name is None:
+ logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
+ return None
+ template = NAME2TEMPLATE.get(task_name)
+ return template.from_dict(task_template_dict)
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8a58ad4049cf14cf60aac7f75abd066250c17df
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48de487f77bc60c902cdc9c8c1c3962e5a4116ac
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5382283a72c992c4445474d8fd5429da4b78ec85
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6a6957523db838028048e04b540502d0e3ec11d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cca2146e39450bbcbc39a5204e7f989632e485f3
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a08c31fef2a2c357745505e2a7a7b730f5b33b2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c9ec3c1b972a5507700ef88813c45e7cc257bbd
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf6275615b0e9bc89bf19128541b02736b9fe5bb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1d0a348b9915991138e18d5e9b68601fdda0b91
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/audio_classification.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/audio_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f9fe402f3814b4db0eb1832405adcfaef77503e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/audio_classification.py
@@ -0,0 +1,33 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Audio, ClassLabel, Features
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class AudioClassification(TaskTemplate):
+ task: str = field(default="audio-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"audio": Audio()})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ audio_column: str = "audio"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.audio_column: "audio",
+ self.label_column: "labels",
+ }
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py
new file mode 100644
index 0000000000000000000000000000000000000000..103a98a1bc9774de6b652bbc69b41501a419f0f8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py
@@ -0,0 +1,30 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Audio, Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class AutomaticSpeechRecognition(TaskTemplate):
+ task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"audio": Audio()})
+ label_schema: ClassVar[Features] = Features({"transcription": Value("string")})
+ audio_column: str = "audio"
+ transcription_column: str = "transcription"
+
+ def align_with_features(self, features):
+ if self.audio_column not in features:
+ raise ValueError(f"Column {self.audio_column} is not present in features.")
+ if not isinstance(features[self.audio_column], Audio):
+ raise ValueError(f"Column {self.audio_column} is not an Audio type.")
+ task_template = copy.deepcopy(self)
+ input_schema = self.input_schema.copy()
+ input_schema["audio"] = features[self.audio_column]
+ task_template.__dict__["input_schema"] = input_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.audio_column: "audio", self.transcription_column: "transcription"}
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/base.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..21a5337ffc0784a1ed12f4617a9a0ef6ba7253e5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/base.py
@@ -0,0 +1,39 @@
+import abc
+import copy
+import dataclasses
+from dataclasses import dataclass
+from typing import ClassVar, Dict, Type, TypeVar
+
+from ..features import Features
+
+
+T = TypeVar("T", bound="TaskTemplate")
+
+
+@dataclass(frozen=True)
+class TaskTemplate(abc.ABC):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str
+ input_schema: ClassVar[Features]
+ label_schema: ClassVar[Features]
+
+ def align_with_features(self: T, features: Features) -> T:
+ """
+ Align features with the task template.
+ """
+ # No-op
+ return copy.deepcopy(self)
+
+ @property
+ def features(self) -> Features:
+ return Features(**self.input_schema, **self.label_schema)
+
+ @property
+ @abc.abstractmethod
+ def column_mapping(self) -> Dict[str, str]:
+ raise NotImplementedError
+
+ @classmethod
+ def from_dict(cls: Type[T], template_dict: dict) -> T:
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in template_dict.items() if k in field_names})
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/image_classification.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/image_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..20a19e0408a7ec8061ac4fac700d83e6dcbadcdf
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/image_classification.py
@@ -0,0 +1,33 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import ClassLabel, Features, Image
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class ImageClassification(TaskTemplate):
+ task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"image": Image()})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ image_column: str = "image"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.image_column: "image",
+ self.label_column: "labels",
+ }
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/language_modeling.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/language_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2837744fa1718e57ffbeeca1a6e9a60c9468d8f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/language_modeling.py
@@ -0,0 +1,18 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class LanguageModeling(TaskTemplate):
+ task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
+
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({})
+ text_column: str = "text"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text"}
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/question_answering.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/question_answering.py
new file mode 100644
index 0000000000000000000000000000000000000000..349fd54141762631eec025681015cedd97c23b63
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/question_answering.py
@@ -0,0 +1,29 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Sequence, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class QuestionAnsweringExtractive(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")})
+ label_schema: ClassVar[Features] = Features(
+ {
+ "answers": Sequence(
+ {
+ "text": Value("string"),
+ "answer_start": Value("int32"),
+ }
+ )
+ }
+ )
+ question_column: str = "question"
+ context_column: str = "context"
+ answers_column: str = "answers"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/summarization.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/summarization.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0057b07b4f62947c1bfde1962bf06be1427c363
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/summarization.py
@@ -0,0 +1,19 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class Summarization(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({"summary": Value("string")})
+ text_column: str = "text"
+ summary_column: str = "summary"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text", self.summary_column: "summary"}
diff --git a/env-llmeval/lib/python3.10/site-packages/datasets/tasks/text_classification.py b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/text_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..13584b73e8ae668bd6c145b60598cd6859be5146
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/datasets/tasks/text_classification.py
@@ -0,0 +1,34 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import ClassLabel, Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class TextClassification(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="text-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ text_column: str = "text"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.text_column: "text",
+ self.label_column: "labels",
+ }
diff --git a/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..1eb999e0babe28897c4544d034b36f5f0fe77ca6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/LICENSE
@@ -0,0 +1,35 @@
+Copyright (c) 2004-2016 California Institute of Technology.
+Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
+All rights reserved.
+
+This software is available subject to the conditions and terms laid
+out below. By downloading and using this software you are agreeing
+to the following conditions.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ - Neither the names of the copyright holders nor the names of any of
+ the contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..79509850401450817ad338cd856c35cf78c2b3e4
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/METADATA
@@ -0,0 +1,280 @@
+Metadata-Version: 2.1
+Name: dill
+Version: 0.3.8
+Summary: serialize all of Python
+Home-page: https://github.com/uqfoundation/dill
+Author: Mike McKerns
+Author-email: mmckerns@uqfoundation.org
+Maintainer: Mike McKerns
+Maintainer-email: mmckerns@uqfoundation.org
+License: BSD-3-Clause
+Download-URL: https://pypi.org/project/dill/#files
+Project-URL: Documentation, http://dill.rtfd.io
+Project-URL: Source Code, https://github.com/uqfoundation/dill
+Project-URL: Bug Tracker, https://github.com/uqfoundation/dill/issues
+Platform: Linux
+Platform: Windows
+Platform: Mac
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Software Development
+Requires-Python: >=3.8
+Provides-Extra: graph
+Requires-Dist: objgraph (>=1.7.2) ; extra == 'graph'
+Provides-Extra: profile
+Requires-Dist: gprof2dot (>=2022.7.29) ; extra == 'profile'
+Provides-Extra: readline
+
+-----------------------------
+dill: serialize all of Python
+-----------------------------
+
+About Dill
+==========
+
+``dill`` extends Python's ``pickle`` module for serializing and de-serializing
+Python objects to the majority of the built-in Python types. Serialization
+is the process of converting an object to a byte stream, and the inverse
+of which is converting a byte stream back to a Python object hierarchy.
+
+``dill`` provides the user the same interface as the ``pickle`` module, and
+also includes some additional features. In addition to pickling Python
+objects, ``dill`` provides the ability to save the state of an interpreter
+session in a single command. Hence, it would be feasible to save an
+interpreter session, close the interpreter, ship the pickled file to
+another computer, open a new interpreter, unpickle the session and
+thus continue from the 'saved' state of the original interpreter
+session.
+
+``dill`` can be used to store Python objects to a file, but the primary
+usage is to send Python objects across the network as a byte stream.
+``dill`` is quite flexible, and allows arbitrary user defined classes
+and functions to be serialized. Thus ``dill`` is not intended to be
+secure against erroneously or maliciously constructed data. It is
+left to the user to decide whether the data they unpickle is from
+a trustworthy source.
+
+``dill`` is part of ``pathos``, a Python framework for heterogeneous computing.
+``dill`` is in active development, so any user feedback, bug reports, comments,
+or suggestions are highly appreciated. A list of issues is located at
+https://github.com/uqfoundation/dill/issues, with a legacy list maintained at
+https://uqfoundation.github.io/project/pathos/query.
+
+
+Major Features
+==============
+
+``dill`` can pickle the following standard types:
+
+ - none, type, bool, int, float, complex, bytes, str,
+ - tuple, list, dict, file, buffer, builtin,
+ - Python classes, namedtuples, dataclasses, metaclasses,
+ - instances of classes,
+ - set, frozenset, array, functions, exceptions
+
+``dill`` can also pickle more 'exotic' standard types:
+
+ - functions with yields, nested functions, lambdas,
+ - cell, method, unboundmethod, module, code, methodwrapper,
+ - methoddescriptor, getsetdescriptor, memberdescriptor, wrapperdescriptor,
+ - dictproxy, slice, notimplemented, ellipsis, quit
+
+``dill`` cannot yet pickle these standard types:
+
+ - frame, generator, traceback
+
+``dill`` also provides the capability to:
+
+ - save and load Python interpreter sessions
+ - save and extract the source code from functions and classes
+ - interactively diagnose pickling errors
+
+
+Current Release
+===============
+
+The latest released version of ``dill`` is available from:
+
+ https://pypi.org/project/dill
+
+``dill`` is distributed under a 3-clause BSD license.
+
+
+Development Version
+===================
+
+You can get the latest development version with all the shiny new features at:
+
+ https://github.com/uqfoundation
+
+If you have a new contribution, please submit a pull request.
+
+
+Installation
+============
+
+``dill`` can be installed with ``pip``::
+
+ $ pip install dill
+
+To optionally include the ``objgraph`` diagnostic tool in the install::
+
+ $ pip install dill[graph]
+
+To optionally include the ``gprof2dot`` diagnostic tool in the install::
+
+ $ pip install dill[profile]
+
+For windows users, to optionally install session history tools::
+
+ $ pip install dill[readline]
+
+
+Requirements
+============
+
+``dill`` requires:
+
+ - ``python`` (or ``pypy``), **>=3.8**
+ - ``setuptools``, **>=42**
+
+Optional requirements:
+
+ - ``objgraph``, **>=1.7.2**
+ - ``gprof2dot``, **>=2022.7.29**
+ - ``pyreadline``, **>=1.7.1** (on windows)
+
+
+Basic Usage
+===========
+
+``dill`` is a drop-in replacement for ``pickle``. Existing code can be
+updated to allow complete pickling using::
+
+ >>> import dill as pickle
+
+or::
+
+ >>> from dill import dumps, loads
+
+``dumps`` converts the object to a unique byte string, and ``loads`` performs
+the inverse operation::
+
+ >>> squared = lambda x: x**2
+ >>> loads(dumps(squared))(3)
+ 9
+
+There are a number of options to control serialization which are provided
+as keyword arguments to several ``dill`` functions:
+
+* with *protocol*, the pickle protocol level can be set. This uses the
+ same value as the ``pickle`` module, *DEFAULT_PROTOCOL*.
+* with *byref=True*, ``dill`` to behave a lot more like pickle with
+ certain objects (like modules) pickled by reference as opposed to
+ attempting to pickle the object itself.
+* with *recurse=True*, objects referred to in the global dictionary are
+ recursively traced and pickled, instead of the default behavior of
+ attempting to store the entire global dictionary.
+* with *fmode*, the contents of the file can be pickled along with the file
+ handle, which is useful if the object is being sent over the wire to a
+ remote system which does not have the original file on disk. Options are
+ *HANDLE_FMODE* for just the handle, *CONTENTS_FMODE* for the file content
+ and *FILE_FMODE* for content and handle.
+* with *ignore=False*, objects reconstructed with types defined in the
+ top-level script environment use the existing type in the environment
+ rather than a possibly different reconstructed type.
+
+The default serialization can also be set globally in *dill.settings*.
+Thus, we can modify how ``dill`` handles references to the global dictionary
+locally or globally::
+
+ >>> import dill.settings
+ >>> dumps(absolute) == dumps(absolute, recurse=True)
+ False
+ >>> dill.settings['recurse'] = True
+ >>> dumps(absolute) == dumps(absolute, recurse=True)
+ True
+
+``dill`` also includes source code inspection, as an alternate to pickling::
+
+ >>> import dill.source
+ >>> print(dill.source.getsource(squared))
+ squared = lambda x:x**2
+
+To aid in debugging pickling issues, use *dill.detect* which provides
+tools like pickle tracing::
+
+ >>> import dill.detect
+ >>> with dill.detect.trace():
+ >>> dumps(squared)
+ ┬ F1: at 0x7fe074f8c280>
+ ├┬ F2:
+ │└ # F2 [34 B]
+ ├┬ Co: at 0x7fe07501eb30, file "", line 1>
+ │├┬ F2:
+ ││└ # F2 [19 B]
+ │└ # Co [87 B]
+ ├┬ D1:
+ │└ # D1 [22 B]
+ ├┬ D2:
+ │└ # D2 [2 B]
+ ├┬ D2:
+ │├┬ D2:
+ ││└ # D2 [2 B]
+ │└ # D2 [23 B]
+ â”” # F1 [180 B]
+
+With trace, we see how ``dill`` stored the lambda (``F1``) by first storing
+``_create_function``, the underlying code object (``Co``) and ``_create_code``
+(which is used to handle code objects), then we handle the reference to
+the global dict (``D2``) plus other dictionaries (``D1`` and ``D2``) that
+save the lambda object's state. A ``#`` marks when the object is actually stored.
+
+
+More Information
+================
+
+Probably the best way to get started is to look at the documentation at
+http://dill.rtfd.io. Also see ``dill.tests`` for a set of scripts that
+demonstrate how ``dill`` can serialize different Python objects. You can
+run the test suite with ``python -m dill.tests``. The contents of any
+pickle file can be examined with ``undill``. As ``dill`` conforms to
+the ``pickle`` interface, the examples and documentation found at
+http://docs.python.org/library/pickle.html also apply to ``dill``
+if one will ``import dill as pickle``. The source code is also generally
+well documented, so further questions may be resolved by inspecting the
+code itself. Please feel free to submit a ticket on github, or ask a
+question on stackoverflow (**@Mike McKerns**).
+If you would like to share how you use ``dill`` in your work, please send
+an email (to **mmckerns at uqfoundation dot org**).
+
+
+Citation
+========
+
+If you use ``dill`` to do research that leads to publication, we ask that you
+acknowledge use of ``dill`` by citing the following in your publication::
+
+ M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
+ "Building a framework for predictive science", Proceedings of
+ the 10th Python in Science Conference, 2011;
+ http://arxiv.org/pdf/1202.1056
+
+ Michael McKerns and Michael Aivazis,
+ "pathos: a framework for heterogeneous computing", 2010- ;
+ https://uqfoundation.github.io/project/pathos
+
+Please see https://uqfoundation.github.io/project/pathos or
+http://arxiv.org/pdf/1202.1056 for further information.
+
diff --git a/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..5d23e0318a475f2fe9f1d70decf1ac75bf4bd425
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/RECORD
@@ -0,0 +1,97 @@
+../../../bin/get_gprof,sha256=5UXwSf1BcfNtv4U5oGL8yBcORUJXKeOKS_CAK2mS76Y,2447
+../../../bin/get_objgraph,sha256=i9nSmF-NxOfqVVATQhW8k0UWRPiPbqvGX0gh9rOal4A,1641
+../../../bin/undill,sha256=4LwLIDxWu23zePFX3C_90CVZcMGl9hJuH0jLnmUq3Ks,577
+dill-0.3.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+dill-0.3.8.dist-info/LICENSE,sha256=UeiKI-eId86r1yfCGcel4z9l2pugOsT9KFupBKoc4is,1790
+dill-0.3.8.dist-info/METADATA,sha256=UxkSs2cU8JyrJsV5kS0QR9crJ07hrUJS2RiIMQaC4ss,10106
+dill-0.3.8.dist-info/RECORD,,
+dill-0.3.8.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
+dill-0.3.8.dist-info/top_level.txt,sha256=HLSIyYIjQzJiBvs3_-16ntezE3j6mWGTW0DT1xDd7X0,5
+dill/__diff.py,sha256=kirMxzB7E8lfjo21M5oIf7if95ny0aWhYB790KMpN08,7143
+dill/__info__.py,sha256=Kmel_yLTyH-hwNC5cVfzN-LV08AbS_AvSa2uwMeIQdk,10756
+dill/__init__.py,sha256=j-Jxl3H6bxatS0h2f8ywWs7DChwk7B9ozuZQBVcjYGU,3798
+dill/__pycache__/__diff.cpython-310.pyc,,
+dill/__pycache__/__info__.cpython-310.pyc,,
+dill/__pycache__/__init__.cpython-310.pyc,,
+dill/__pycache__/_dill.cpython-310.pyc,,
+dill/__pycache__/_objects.cpython-310.pyc,,
+dill/__pycache__/_shims.cpython-310.pyc,,
+dill/__pycache__/detect.cpython-310.pyc,,
+dill/__pycache__/logger.cpython-310.pyc,,
+dill/__pycache__/objtypes.cpython-310.pyc,,
+dill/__pycache__/pointers.cpython-310.pyc,,
+dill/__pycache__/session.cpython-310.pyc,,
+dill/__pycache__/settings.cpython-310.pyc,,
+dill/__pycache__/source.cpython-310.pyc,,
+dill/__pycache__/temp.cpython-310.pyc,,
+dill/_dill.py,sha256=3Eo6gKj1sODJjgPgYNT8TU-YL6QNQ7rIeWPUVnRzyqQ,88548
+dill/_objects.py,sha256=dPlUXzQIh8CA0fMy9NMbwwLGUPmXe5H8MdQtRWB1b_M,19605
+dill/_shims.py,sha256=IuzQcyPET5VWmWMoSGStieoedvNXlb5suDpa4bykTbQ,6635
+dill/detect.py,sha256=Mb-PfCxn1mg0l3TmHXyPNVEc4n3fuxc_nue6eL3-q_o,11114
+dill/logger.py,sha256=YS5ZloAOKjJRZaOBRCaMUDWmWVQZcicvbXVSrz8L8XU,11134
+dill/objtypes.py,sha256=BamGH3BEM6lLlxisuvXcGjsCRLNeoLs4_rFZrM5r2yM,736
+dill/pointers.py,sha256=vnQzjwGtKMGnmbdYRXRWNLMyceNPSw4f7UpvwCXLYbE,4467
+dill/session.py,sha256=NvCWpoP9r_rGBL2pOwwxOri8mFly5KlIWG3GwkBFnc0,23525
+dill/settings.py,sha256=7I3yvSpPKstOqpoW2gv3X77kXK-hZlqCnF7nJUGhxTY,630
+dill/source.py,sha256=DWfIxcBjpjbbKYz2DstV9kRdjajBdZLOcLXfsZsPo9U,45121
+dill/temp.py,sha256=KJUry4t0UjQCh5t4LXcxNyMF_uOGHwcjTuNYTJD9qdA,8027
+dill/tests/__init__.py,sha256=Gx-chVB-l-e7ncsGp2zF4BimTjbUyO7BY7RkrO835vY,479
+dill/tests/__main__.py,sha256=fHhioQwcOvTPlf1RM_wVQ0Y3ndETWJOuXJQ2rVtqliA,899
+dill/tests/__pycache__/__init__.cpython-310.pyc,,
+dill/tests/__pycache__/__main__.cpython-310.pyc,,
+dill/tests/__pycache__/test_abc.cpython-310.pyc,,
+dill/tests/__pycache__/test_check.cpython-310.pyc,,
+dill/tests/__pycache__/test_classdef.cpython-310.pyc,,
+dill/tests/__pycache__/test_dataclasses.cpython-310.pyc,,
+dill/tests/__pycache__/test_detect.cpython-310.pyc,,
+dill/tests/__pycache__/test_dictviews.cpython-310.pyc,,
+dill/tests/__pycache__/test_diff.cpython-310.pyc,,
+dill/tests/__pycache__/test_extendpickle.cpython-310.pyc,,
+dill/tests/__pycache__/test_fglobals.cpython-310.pyc,,
+dill/tests/__pycache__/test_file.cpython-310.pyc,,
+dill/tests/__pycache__/test_functions.cpython-310.pyc,,
+dill/tests/__pycache__/test_functors.cpython-310.pyc,,
+dill/tests/__pycache__/test_logger.cpython-310.pyc,,
+dill/tests/__pycache__/test_mixins.cpython-310.pyc,,
+dill/tests/__pycache__/test_module.cpython-310.pyc,,
+dill/tests/__pycache__/test_moduledict.cpython-310.pyc,,
+dill/tests/__pycache__/test_nested.cpython-310.pyc,,
+dill/tests/__pycache__/test_objects.cpython-310.pyc,,
+dill/tests/__pycache__/test_properties.cpython-310.pyc,,
+dill/tests/__pycache__/test_pycapsule.cpython-310.pyc,,
+dill/tests/__pycache__/test_recursive.cpython-310.pyc,,
+dill/tests/__pycache__/test_registered.cpython-310.pyc,,
+dill/tests/__pycache__/test_restricted.cpython-310.pyc,,
+dill/tests/__pycache__/test_selected.cpython-310.pyc,,
+dill/tests/__pycache__/test_session.cpython-310.pyc,,
+dill/tests/__pycache__/test_source.cpython-310.pyc,,
+dill/tests/__pycache__/test_temp.cpython-310.pyc,,
+dill/tests/__pycache__/test_weakref.cpython-310.pyc,,
+dill/tests/test_abc.py,sha256=BSjSKKCQ5_iPfFxAd0yBq4KSAJxelrlC3IzoAhjd1C4,4227
+dill/tests/test_check.py,sha256=4F5gkX6zxY7C5sD2_0Tkqf3T3jmQl0K15FOxYUTZQl0,1396
+dill/tests/test_classdef.py,sha256=fI3fVk4SlsjNMMs5RfU6DUCaxpP7YYRjvLZ2nhXMHuc,8600
+dill/tests/test_dataclasses.py,sha256=yKjFuG24ymLtjk-sZZdhvNY7aDqerTDpMcfi_eV4ft0,890
+dill/tests/test_detect.py,sha256=sE9THufHXCDysBPQ4QkN5DHn6DaIldVRAEciseIRH08,4083
+dill/tests/test_dictviews.py,sha256=Jhol0cQWPwoQrp7OPxGhU8FNRX2GgfFp9fTahCvQEPA,1337
+dill/tests/test_diff.py,sha256=5VIWf2fpV6auLHNfzkHLTrgx6AJBlE2xe5Wanfmq8TM,2667
+dill/tests/test_extendpickle.py,sha256=gONrMBHO94Edhnqm1wo49hgzwmaxHs7L-86Hs-7albY,1315
+dill/tests/test_fglobals.py,sha256=DCvdojmKcLN_X9vX4Qe1FbsqjeoJK-wsY2uJwBfNFro,1676
+dill/tests/test_file.py,sha256=jUU2h8qaDOIe1mn_Ng7wqCZcd7Ucx3TAaI-K_90_Tbk,13578
+dill/tests/test_functions.py,sha256=-mqTpUbzRu8GynjBGD25dRDm8qInIe07sRZmCcA_iXY,4267
+dill/tests/test_functors.py,sha256=7rx9wLmrgFwF0gUm_-SGOISPYSok0XjmrQ-jFMRt6gs,930
+dill/tests/test_logger.py,sha256=D9zGRaA-CEadG13orPS_D4gPVZlkqXf9Zu8wn2oMiYc,2385
+dill/tests/test_mixins.py,sha256=YtB24BjodooLj85ijFbAxiM7LlFQZAUL8RQVx9vIAwY,4007
+dill/tests/test_module.py,sha256=KLl_gZJJqDY7S_bD5wCqKL8JQCS0MDMoipVQSDfASlo,1943
+dill/tests/test_moduledict.py,sha256=faXG6-5AcmCfP3xe2FYGOUdSosU-9TWnKU_ZVqPDaxY,1182
+dill/tests/test_nested.py,sha256=ViWiOrChLZktS0z6qyKqMxDdTuy9kAX4qMgH_OreMcc,3146
+dill/tests/test_objects.py,sha256=pPAth0toC_UWztuKHC7NZlsRBb0g_gSAt70UbUtXEXo,1931
+dill/tests/test_properties.py,sha256=h35c-lYir1JG6oLPtrA0eYE0xoSohIimsA3yIfRw6yA,1346
+dill/tests/test_pycapsule.py,sha256=EXFyB6g1Wx9O9LM6StIeUKhrhln4_hou1xrtGwkt4Cw,1417
+dill/tests/test_recursive.py,sha256=bfr-BsK1Xu0PU7l2srHsDXdY2l1LeM3L3w7NraXO0cc,4182
+dill/tests/test_registered.py,sha256=J3oku053VfdJgYh4Z5_kyFRf-C52JglIzjcyxEaYOhk,1573
+dill/tests/test_restricted.py,sha256=xLMIae8sYJksAj9hKKyHFHIL8vtbGpFeOULz59snYM4,783
+dill/tests/test_selected.py,sha256=Hp-AAd6Qp5FJZ-vY_Bbejo5Rg6xFstec5QkSg5D7Aac,3218
+dill/tests/test_session.py,sha256=KoSPvs4c4VJ8mFMF7EUlD_3GwcOhhipt9fqHr--Go-4,10161
+dill/tests/test_source.py,sha256=wZTYBbpzUwj3Mz5OjrHQKfskaVVwuy2UQDg5p2wLbT4,6036
+dill/tests/test_temp.py,sha256=F_7nJkSetLIBSAYMw1-hYh03iVrEYwGs-4GIUzoBOfY,2619
+dill/tests/test_weakref.py,sha256=mrjZP5aPtUP1wBD6ibPsDsfI9ffmq_Ykt7ltoodi5Lg,1602
diff --git a/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..85eea7018a40c657c08ef73fcf3a39024b2df2cb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/dill-0.3.8.dist-info/top_level.txt
@@ -0,0 +1 @@
+dill
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19ea97f009a142a6cb55ef0928e5f18c367ba90c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb28b108ae3cbae53bab8ca3673283e7e8727c3b
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84c6ad899121d8fddd6e838d4279e285db851ccb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..145b512108bcdeccff1eecb009612cb5ca5bf1d4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a3598ead6cc09c54a6ee85d36f64d2ff1374f2d
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5edc40bff178aac0beb822f55df77e53e24032a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_clique.py b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_clique.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bee210982888a142f07a043bbde24bdad80fae9
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_clique.py
@@ -0,0 +1,291 @@
+import pytest
+
+import networkx as nx
+from networkx import convert_node_labels_to_integers as cnlti
+
+
+class TestCliques:
+ def setup_method(self):
+ z = [3, 4, 3, 4, 2, 4, 2, 1, 1, 1, 1]
+ self.G = cnlti(nx.generators.havel_hakimi_graph(z), first_label=1)
+ self.cl = list(nx.find_cliques(self.G))
+ H = nx.complete_graph(6)
+ H = nx.relabel_nodes(H, {i: i + 1 for i in range(6)})
+ H.remove_edges_from([(2, 6), (2, 5), (2, 4), (1, 3), (5, 3)])
+ self.H = H
+
+ def test_find_cliques1(self):
+ cl = list(nx.find_cliques(self.G))
+ rcl = nx.find_cliques_recursive(self.G)
+ expected = [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]]
+ assert sorted(map(sorted, cl)) == sorted(map(sorted, rcl))
+ assert sorted(map(sorted, cl)) == sorted(map(sorted, expected))
+
+ def test_selfloops(self):
+ self.G.add_edge(1, 1)
+ cl = list(nx.find_cliques(self.G))
+ rcl = list(nx.find_cliques_recursive(self.G))
+ assert set(map(frozenset, cl)) == set(map(frozenset, rcl))
+ answer = [{2, 6, 1, 3}, {2, 6, 4}, {5, 4, 7}, {8, 9}, {10, 11}]
+ assert len(answer) == len(cl)
+ assert all(set(c) in answer for c in cl)
+
+ def test_find_cliques2(self):
+ hcl = list(nx.find_cliques(self.H))
+ assert sorted(map(sorted, hcl)) == [[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]]
+
+ def test_find_cliques3(self):
+ # all cliques are [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]]
+
+ cl = list(nx.find_cliques(self.G, [2]))
+ rcl = nx.find_cliques_recursive(self.G, [2])
+ expected = [[2, 6, 1, 3], [2, 6, 4]]
+ assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected))
+ assert sorted(map(sorted, cl)) == sorted(map(sorted, expected))
+
+ cl = list(nx.find_cliques(self.G, [2, 3]))
+ rcl = nx.find_cliques_recursive(self.G, [2, 3])
+ expected = [[2, 6, 1, 3]]
+ assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected))
+ assert sorted(map(sorted, cl)) == sorted(map(sorted, expected))
+
+ cl = list(nx.find_cliques(self.G, [2, 6, 4]))
+ rcl = nx.find_cliques_recursive(self.G, [2, 6, 4])
+ expected = [[2, 6, 4]]
+ assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected))
+ assert sorted(map(sorted, cl)) == sorted(map(sorted, expected))
+
+ cl = list(nx.find_cliques(self.G, [2, 6, 4]))
+ rcl = nx.find_cliques_recursive(self.G, [2, 6, 4])
+ expected = [[2, 6, 4]]
+ assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected))
+ assert sorted(map(sorted, cl)) == sorted(map(sorted, expected))
+
+ with pytest.raises(ValueError):
+ list(nx.find_cliques(self.G, [2, 6, 4, 1]))
+
+ with pytest.raises(ValueError):
+ list(nx.find_cliques_recursive(self.G, [2, 6, 4, 1]))
+
+ def test_number_of_cliques(self):
+ G = self.G
+ assert nx.number_of_cliques(G, 1) == 1
+ assert list(nx.number_of_cliques(G, [1]).values()) == [1]
+ assert list(nx.number_of_cliques(G, [1, 2]).values()) == [1, 2]
+ assert nx.number_of_cliques(G, [1, 2]) == {1: 1, 2: 2}
+ assert nx.number_of_cliques(G, 2) == 2
+ assert nx.number_of_cliques(G) == {
+ 1: 1,
+ 2: 2,
+ 3: 1,
+ 4: 2,
+ 5: 1,
+ 6: 2,
+ 7: 1,
+ 8: 1,
+ 9: 1,
+ 10: 1,
+ 11: 1,
+ }
+ assert nx.number_of_cliques(G, nodes=list(G)) == {
+ 1: 1,
+ 2: 2,
+ 3: 1,
+ 4: 2,
+ 5: 1,
+ 6: 2,
+ 7: 1,
+ 8: 1,
+ 9: 1,
+ 10: 1,
+ 11: 1,
+ }
+ assert nx.number_of_cliques(G, nodes=[2, 3, 4]) == {2: 2, 3: 1, 4: 2}
+ assert nx.number_of_cliques(G, cliques=self.cl) == {
+ 1: 1,
+ 2: 2,
+ 3: 1,
+ 4: 2,
+ 5: 1,
+ 6: 2,
+ 7: 1,
+ 8: 1,
+ 9: 1,
+ 10: 1,
+ 11: 1,
+ }
+ assert nx.number_of_cliques(G, list(G), cliques=self.cl) == {
+ 1: 1,
+ 2: 2,
+ 3: 1,
+ 4: 2,
+ 5: 1,
+ 6: 2,
+ 7: 1,
+ 8: 1,
+ 9: 1,
+ 10: 1,
+ 11: 1,
+ }
+
+ def test_node_clique_number(self):
+ G = self.G
+ assert nx.node_clique_number(G, 1) == 4
+ assert list(nx.node_clique_number(G, [1]).values()) == [4]
+ assert list(nx.node_clique_number(G, [1, 2]).values()) == [4, 4]
+ assert nx.node_clique_number(G, [1, 2]) == {1: 4, 2: 4}
+ assert nx.node_clique_number(G, 1) == 4
+ assert nx.node_clique_number(G) == {
+ 1: 4,
+ 2: 4,
+ 3: 4,
+ 4: 3,
+ 5: 3,
+ 6: 4,
+ 7: 3,
+ 8: 2,
+ 9: 2,
+ 10: 2,
+ 11: 2,
+ }
+ assert nx.node_clique_number(G, cliques=self.cl) == {
+ 1: 4,
+ 2: 4,
+ 3: 4,
+ 4: 3,
+ 5: 3,
+ 6: 4,
+ 7: 3,
+ 8: 2,
+ 9: 2,
+ 10: 2,
+ 11: 2,
+ }
+ assert nx.node_clique_number(G, [1, 2], cliques=self.cl) == {1: 4, 2: 4}
+ assert nx.node_clique_number(G, 1, cliques=self.cl) == 4
+
+ def test_make_clique_bipartite(self):
+ G = self.G
+ B = nx.make_clique_bipartite(G)
+ assert sorted(B) == [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
+ # Project onto the nodes of the original graph.
+ H = nx.projected_graph(B, range(1, 12))
+ assert H.adj == G.adj
+ # Project onto the nodes representing the cliques.
+ H1 = nx.projected_graph(B, range(-5, 0))
+ # Relabel the negative numbers as positive ones.
+ H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)})
+ assert sorted(H1) == [1, 2, 3, 4, 5]
+
+ def test_make_max_clique_graph(self):
+ """Tests that the maximal clique graph is the same as the bipartite
+ clique graph after being projected onto the nodes representing the
+ cliques.
+
+ """
+ G = self.G
+ B = nx.make_clique_bipartite(G)
+ # Project onto the nodes representing the cliques.
+ H1 = nx.projected_graph(B, range(-5, 0))
+ # Relabel the negative numbers as nonnegative ones, starting at
+ # 0.
+ H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)})
+ H2 = nx.make_max_clique_graph(G)
+ assert H1.adj == H2.adj
+
+ def test_directed(self):
+ with pytest.raises(nx.NetworkXNotImplemented):
+ next(nx.find_cliques(nx.DiGraph()))
+
+ def test_find_cliques_trivial(self):
+ G = nx.Graph()
+ assert sorted(nx.find_cliques(G)) == []
+ assert sorted(nx.find_cliques_recursive(G)) == []
+
+ def test_make_max_clique_graph_create_using(self):
+ G = nx.Graph([(1, 2), (3, 1), (4, 1), (5, 6)])
+ E = nx.Graph([(0, 1), (0, 2), (1, 2)])
+ E.add_node(3)
+ assert nx.is_isomorphic(nx.make_max_clique_graph(G, create_using=nx.Graph), E)
+
+
+class TestEnumerateAllCliques:
+ def test_paper_figure_4(self):
+ # Same graph as given in Fig. 4 of paper enumerate_all_cliques is
+ # based on.
+ # http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129
+ G = nx.Graph()
+ edges_fig_4 = [
+ ("a", "b"),
+ ("a", "c"),
+ ("a", "d"),
+ ("a", "e"),
+ ("b", "c"),
+ ("b", "d"),
+ ("b", "e"),
+ ("c", "d"),
+ ("c", "e"),
+ ("d", "e"),
+ ("f", "b"),
+ ("f", "c"),
+ ("f", "g"),
+ ("g", "f"),
+ ("g", "c"),
+ ("g", "d"),
+ ("g", "e"),
+ ]
+ G.add_edges_from(edges_fig_4)
+
+ cliques = list(nx.enumerate_all_cliques(G))
+ clique_sizes = list(map(len, cliques))
+ assert sorted(clique_sizes) == clique_sizes
+
+ expected_cliques = [
+ ["a"],
+ ["b"],
+ ["c"],
+ ["d"],
+ ["e"],
+ ["f"],
+ ["g"],
+ ["a", "b"],
+ ["a", "b", "d"],
+ ["a", "b", "d", "e"],
+ ["a", "b", "e"],
+ ["a", "c"],
+ ["a", "c", "d"],
+ ["a", "c", "d", "e"],
+ ["a", "c", "e"],
+ ["a", "d"],
+ ["a", "d", "e"],
+ ["a", "e"],
+ ["b", "c"],
+ ["b", "c", "d"],
+ ["b", "c", "d", "e"],
+ ["b", "c", "e"],
+ ["b", "c", "f"],
+ ["b", "d"],
+ ["b", "d", "e"],
+ ["b", "e"],
+ ["b", "f"],
+ ["c", "d"],
+ ["c", "d", "e"],
+ ["c", "d", "e", "g"],
+ ["c", "d", "g"],
+ ["c", "e"],
+ ["c", "e", "g"],
+ ["c", "f"],
+ ["c", "f", "g"],
+ ["c", "g"],
+ ["d", "e"],
+ ["d", "e", "g"],
+ ["d", "g"],
+ ["e", "g"],
+ ["f", "g"],
+ ["a", "b", "c"],
+ ["a", "b", "c", "d"],
+ ["a", "b", "c", "d", "e"],
+ ["a", "b", "c", "e"],
+ ]
+
+ assert sorted(map(sorted, cliques)) == sorted(map(sorted, expected_cliques))
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_cluster.py b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..b656ba81553bc9bdca1fe828c5e9c01427c43555
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_cluster.py
@@ -0,0 +1,549 @@
+import pytest
+
+import networkx as nx
+
+
+class TestTriangles:
+ def test_empty(self):
+ G = nx.Graph()
+ assert list(nx.triangles(G).values()) == []
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert list(nx.triangles(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ assert nx.triangles(G) == {
+ 0: 0,
+ 1: 0,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0,
+ 9: 0,
+ }
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert list(nx.triangles(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0]
+ assert nx.triangles(G, 1) == 0
+ assert list(nx.triangles(G, [1, 2]).values()) == [0, 0]
+ assert nx.triangles(G, 1) == 0
+ assert nx.triangles(G, [1, 2]) == {1: 0, 2: 0}
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert list(nx.triangles(G).values()) == [6, 6, 6, 6, 6]
+ assert sum(nx.triangles(G).values()) / 3 == 10
+ assert nx.triangles(G, 1) == 6
+ G.remove_edge(1, 2)
+ assert list(nx.triangles(G).values()) == [5, 3, 3, 5, 5]
+ assert nx.triangles(G, 1) == 3
+ G.add_edge(3, 3) # ignore self-edges
+ assert list(nx.triangles(G).values()) == [5, 3, 3, 5, 5]
+ assert nx.triangles(G, 3) == 5
+
+
+class TestDirectedClustering:
+ def test_clustering(self):
+ G = nx.DiGraph()
+ assert list(nx.clustering(G).values()) == []
+ assert nx.clustering(G) == {}
+
+ def test_path(self):
+ G = nx.path_graph(10, create_using=nx.DiGraph())
+ assert list(nx.clustering(G).values()) == [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ]
+ assert nx.clustering(G) == {
+ 0: 0,
+ 1: 0,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0,
+ 9: 0,
+ }
+ assert nx.clustering(G, 0) == 0
+
+ def test_k5(self):
+ G = nx.complete_graph(5, create_using=nx.DiGraph())
+ assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1]
+ assert nx.average_clustering(G) == 1
+ G.remove_edge(1, 2)
+ assert list(nx.clustering(G).values()) == [
+ 11 / 12,
+ 1,
+ 1,
+ 11 / 12,
+ 11 / 12,
+ ]
+ assert nx.clustering(G, [1, 4]) == {1: 1, 4: 11 / 12}
+ G.remove_edge(2, 1)
+ assert list(nx.clustering(G).values()) == [
+ 5 / 6,
+ 1,
+ 1,
+ 5 / 6,
+ 5 / 6,
+ ]
+ assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337}
+ assert nx.clustering(G, 4) == 5 / 6
+
+ def test_triangle_and_edge(self):
+ G = nx.cycle_graph(3, create_using=nx.DiGraph())
+ G.add_edge(0, 4)
+ assert nx.clustering(G)[0] == 1 / 6
+
+
+class TestDirectedWeightedClustering:
+ @classmethod
+ def setup_class(cls):
+ global np
+ np = pytest.importorskip("numpy")
+
+ def test_clustering(self):
+ G = nx.DiGraph()
+ assert list(nx.clustering(G, weight="weight").values()) == []
+ assert nx.clustering(G) == {}
+
+ def test_path(self):
+ G = nx.path_graph(10, create_using=nx.DiGraph())
+ assert list(nx.clustering(G, weight="weight").values()) == [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ]
+ assert nx.clustering(G, weight="weight") == {
+ 0: 0,
+ 1: 0,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0,
+ 9: 0,
+ }
+
+ def test_k5(self):
+ G = nx.complete_graph(5, create_using=nx.DiGraph())
+ assert list(nx.clustering(G, weight="weight").values()) == [1, 1, 1, 1, 1]
+ assert nx.average_clustering(G, weight="weight") == 1
+ G.remove_edge(1, 2)
+ assert list(nx.clustering(G, weight="weight").values()) == [
+ 11 / 12,
+ 1,
+ 1,
+ 11 / 12,
+ 11 / 12,
+ ]
+ assert nx.clustering(G, [1, 4], weight="weight") == {1: 1, 4: 11 / 12}
+ G.remove_edge(2, 1)
+ assert list(nx.clustering(G, weight="weight").values()) == [
+ 5 / 6,
+ 1,
+ 1,
+ 5 / 6,
+ 5 / 6,
+ ]
+ assert nx.clustering(G, [1, 4], weight="weight") == {
+ 1: 1,
+ 4: 0.83333333333333337,
+ }
+
+ def test_triangle_and_edge(self):
+ G = nx.cycle_graph(3, create_using=nx.DiGraph())
+ G.add_edge(0, 4, weight=2)
+ assert nx.clustering(G)[0] == 1 / 6
+ # Relaxed comparisons to allow graphblas-algorithms to pass tests
+ np.testing.assert_allclose(nx.clustering(G, weight="weight")[0], 1 / 12)
+ np.testing.assert_allclose(nx.clustering(G, 0, weight="weight"), 1 / 12)
+
+
+class TestWeightedClustering:
+ @classmethod
+ def setup_class(cls):
+ global np
+ np = pytest.importorskip("numpy")
+
+ def test_clustering(self):
+ G = nx.Graph()
+ assert list(nx.clustering(G, weight="weight").values()) == []
+ assert nx.clustering(G) == {}
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert list(nx.clustering(G, weight="weight").values()) == [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ]
+ assert nx.clustering(G, weight="weight") == {
+ 0: 0,
+ 1: 0,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0,
+ 9: 0,
+ }
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert list(nx.clustering(G, weight="weight").values()) == [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ]
+ assert nx.clustering(G, 1) == 0
+ assert list(nx.clustering(G, [1, 2], weight="weight").values()) == [0, 0]
+ assert nx.clustering(G, 1, weight="weight") == 0
+ assert nx.clustering(G, [1, 2], weight="weight") == {1: 0, 2: 0}
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert list(nx.clustering(G, weight="weight").values()) == [1, 1, 1, 1, 1]
+ assert nx.average_clustering(G, weight="weight") == 1
+ G.remove_edge(1, 2)
+ assert list(nx.clustering(G, weight="weight").values()) == [
+ 5 / 6,
+ 1,
+ 1,
+ 5 / 6,
+ 5 / 6,
+ ]
+ assert nx.clustering(G, [1, 4], weight="weight") == {
+ 1: 1,
+ 4: 0.83333333333333337,
+ }
+
+ def test_triangle_and_edge(self):
+ G = nx.cycle_graph(3)
+ G.add_edge(0, 4, weight=2)
+ assert nx.clustering(G)[0] == 1 / 3
+ np.testing.assert_allclose(nx.clustering(G, weight="weight")[0], 1 / 6)
+ np.testing.assert_allclose(nx.clustering(G, 0, weight="weight"), 1 / 6)
+
+ def test_triangle_and_signed_edge(self):
+ G = nx.cycle_graph(3)
+ G.add_edge(0, 1, weight=-1)
+ G.add_edge(3, 0, weight=0)
+ assert nx.clustering(G)[0] == 1 / 3
+ assert nx.clustering(G, weight="weight")[0] == -1 / 3
+
+
+class TestClustering:
+ @classmethod
+ def setup_class(cls):
+ pytest.importorskip("numpy")
+
+ def test_clustering(self):
+ G = nx.Graph()
+ assert list(nx.clustering(G).values()) == []
+ assert nx.clustering(G) == {}
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert list(nx.clustering(G).values()) == [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ]
+ assert nx.clustering(G) == {
+ 0: 0,
+ 1: 0,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0,
+ 9: 0,
+ }
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert list(nx.clustering(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0]
+ assert nx.clustering(G, 1) == 0
+ assert list(nx.clustering(G, [1, 2]).values()) == [0, 0]
+ assert nx.clustering(G, 1) == 0
+ assert nx.clustering(G, [1, 2]) == {1: 0, 2: 0}
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1]
+ assert nx.average_clustering(G) == 1
+ G.remove_edge(1, 2)
+ assert list(nx.clustering(G).values()) == [
+ 5 / 6,
+ 1,
+ 1,
+ 5 / 6,
+ 5 / 6,
+ ]
+ assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337}
+
+ def test_k5_signed(self):
+ G = nx.complete_graph(5)
+ assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1]
+ assert nx.average_clustering(G) == 1
+ G.remove_edge(1, 2)
+ G.add_edge(0, 1, weight=-1)
+ assert list(nx.clustering(G, weight="weight").values()) == [
+ 1 / 6,
+ -1 / 3,
+ 1,
+ 3 / 6,
+ 3 / 6,
+ ]
+
+
+class TestTransitivity:
+ def test_transitivity(self):
+ G = nx.Graph()
+ assert nx.transitivity(G) == 0
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert nx.transitivity(G) == 0
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert nx.transitivity(G) == 0
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert nx.transitivity(G) == 1
+ G.remove_edge(1, 2)
+ assert nx.transitivity(G) == 0.875
+
+
+class TestSquareClustering:
+ def test_clustering(self):
+ G = nx.Graph()
+ assert list(nx.square_clustering(G).values()) == []
+ assert nx.square_clustering(G) == {}
+
+ def test_path(self):
+ G = nx.path_graph(10)
+ assert list(nx.square_clustering(G).values()) == [
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ ]
+ assert nx.square_clustering(G) == {
+ 0: 0,
+ 1: 0,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0,
+ 9: 0,
+ }
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert list(nx.square_clustering(G).values()) == [
+ 1 / 3,
+ 1 / 3,
+ 1 / 3,
+ 1 / 3,
+ 1 / 3,
+ 1 / 3,
+ 1 / 3,
+ 1 / 3,
+ ]
+ assert list(nx.square_clustering(G, [1, 2]).values()) == [1 / 3, 1 / 3]
+ assert nx.square_clustering(G, [1])[1] == 1 / 3
+ assert nx.square_clustering(G, 1) == 1 / 3
+ assert nx.square_clustering(G, [1, 2]) == {1: 1 / 3, 2: 1 / 3}
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert list(nx.square_clustering(G).values()) == [1, 1, 1, 1, 1]
+
+ def test_bipartite_k5(self):
+ G = nx.complete_bipartite_graph(5, 5)
+ assert list(nx.square_clustering(G).values()) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+
+ def test_lind_square_clustering(self):
+ """Test C4 for figure 1 Lind et al (2005)"""
+ G = nx.Graph(
+ [
+ (1, 2),
+ (1, 3),
+ (1, 6),
+ (1, 7),
+ (2, 4),
+ (2, 5),
+ (3, 4),
+ (3, 5),
+ (6, 7),
+ (7, 8),
+ (6, 8),
+ (7, 9),
+ (7, 10),
+ (6, 11),
+ (6, 12),
+ (2, 13),
+ (2, 14),
+ (3, 15),
+ (3, 16),
+ ]
+ )
+ G1 = G.subgraph([1, 2, 3, 4, 5, 13, 14, 15, 16])
+ G2 = G.subgraph([1, 6, 7, 8, 9, 10, 11, 12])
+ assert nx.square_clustering(G, [1])[1] == 3 / 43
+ assert nx.square_clustering(G1, [1])[1] == 2 / 6
+ assert nx.square_clustering(G2, [1])[1] == 1 / 5
+
+ def test_peng_square_clustering(self):
+ """Test eq2 for figure 1 Peng et al (2008)"""
+ G = nx.Graph([(1, 2), (1, 3), (2, 4), (3, 4), (3, 5), (3, 6)])
+ assert nx.square_clustering(G, [1])[1] == 1 / 3
+
+ def test_self_loops_square_clustering(self):
+ G = nx.path_graph(5)
+ assert nx.square_clustering(G) == {0: 0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0}
+ G.add_edges_from([(0, 0), (1, 1), (2, 2)])
+ assert nx.square_clustering(G) == {0: 1, 1: 0.5, 2: 0.2, 3: 0.0, 4: 0}
+
+
+class TestAverageClustering:
+ @classmethod
+ def setup_class(cls):
+ pytest.importorskip("numpy")
+
+ def test_empty(self):
+ G = nx.Graph()
+ with pytest.raises(ZeroDivisionError):
+ nx.average_clustering(G)
+
+ def test_average_clustering(self):
+ G = nx.cycle_graph(3)
+ G.add_edge(2, 3)
+ assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 4
+ assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 4
+ assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 3
+ assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 3
+ assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 3
+ assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 2
+
+ def test_average_clustering_signed(self):
+ G = nx.cycle_graph(3)
+ G.add_edge(2, 3)
+ G.add_edge(0, 1, weight=-1)
+ assert nx.average_clustering(G, weight="weight") == (-1 - 1 - 1 / 3) / 4
+ assert (
+ nx.average_clustering(G, weight="weight", count_zeros=True)
+ == (-1 - 1 - 1 / 3) / 4
+ )
+ assert (
+ nx.average_clustering(G, weight="weight", count_zeros=False)
+ == (-1 - 1 - 1 / 3) / 3
+ )
+
+
+class TestDirectedAverageClustering:
+ @classmethod
+ def setup_class(cls):
+ pytest.importorskip("numpy")
+
+ def test_empty(self):
+ G = nx.DiGraph()
+ with pytest.raises(ZeroDivisionError):
+ nx.average_clustering(G)
+
+ def test_average_clustering(self):
+ G = nx.cycle_graph(3, create_using=nx.DiGraph())
+ G.add_edge(2, 3)
+ assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 8
+ assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 8
+ assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 6
+ assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 6
+ assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 6
+ assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 4
+
+
+class TestGeneralizedDegree:
+ def test_generalized_degree(self):
+ G = nx.Graph()
+ assert nx.generalized_degree(G) == {}
+
+ def test_path(self):
+ G = nx.path_graph(5)
+ assert nx.generalized_degree(G, 0) == {0: 1}
+ assert nx.generalized_degree(G, 1) == {0: 2}
+
+ def test_cubical(self):
+ G = nx.cubical_graph()
+ assert nx.generalized_degree(G, 0) == {0: 3}
+
+ def test_k5(self):
+ G = nx.complete_graph(5)
+ assert nx.generalized_degree(G, 0) == {3: 4}
+ G.remove_edge(0, 1)
+ assert nx.generalized_degree(G, 0) == {2: 3}
+ assert nx.generalized_degree(G, [1, 2]) == {1: {2: 3}, 2: {2: 2, 3: 2}}
+ assert nx.generalized_degree(G) == {
+ 0: {2: 3},
+ 1: {2: 3},
+ 2: {2: 2, 3: 2},
+ 3: {2: 2, 3: 2},
+ 4: {2: 2, 3: 2},
+ }
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_distance_regular.py b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_distance_regular.py
new file mode 100644
index 0000000000000000000000000000000000000000..545fb6dee6a915230971cf4b5a141e47adc2cc15
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_distance_regular.py
@@ -0,0 +1,85 @@
+import pytest
+
+import networkx as nx
+from networkx import is_strongly_regular
+
+
+@pytest.mark.parametrize(
+ "f", (nx.is_distance_regular, nx.intersection_array, nx.is_strongly_regular)
+)
+@pytest.mark.parametrize("graph_constructor", (nx.DiGraph, nx.MultiGraph))
+def test_raises_on_directed_and_multigraphs(f, graph_constructor):
+ G = graph_constructor([(0, 1), (1, 2)])
+ with pytest.raises(nx.NetworkXNotImplemented):
+ f(G)
+
+
+class TestDistanceRegular:
+ def test_is_distance_regular(self):
+ assert nx.is_distance_regular(nx.icosahedral_graph())
+ assert nx.is_distance_regular(nx.petersen_graph())
+ assert nx.is_distance_regular(nx.cubical_graph())
+ assert nx.is_distance_regular(nx.complete_bipartite_graph(3, 3))
+ assert nx.is_distance_regular(nx.tetrahedral_graph())
+ assert nx.is_distance_regular(nx.dodecahedral_graph())
+ assert nx.is_distance_regular(nx.pappus_graph())
+ assert nx.is_distance_regular(nx.heawood_graph())
+ assert nx.is_distance_regular(nx.cycle_graph(3))
+ # no distance regular
+ assert not nx.is_distance_regular(nx.path_graph(4))
+
+ def test_not_connected(self):
+ G = nx.cycle_graph(4)
+ nx.add_cycle(G, [5, 6, 7])
+ assert not nx.is_distance_regular(G)
+
+ def test_global_parameters(self):
+ b, c = nx.intersection_array(nx.cycle_graph(5))
+ g = nx.global_parameters(b, c)
+ assert list(g) == [(0, 0, 2), (1, 0, 1), (1, 1, 0)]
+ b, c = nx.intersection_array(nx.cycle_graph(3))
+ g = nx.global_parameters(b, c)
+ assert list(g) == [(0, 0, 2), (1, 1, 0)]
+
+ def test_intersection_array(self):
+ b, c = nx.intersection_array(nx.cycle_graph(5))
+ assert b == [2, 1]
+ assert c == [1, 1]
+ b, c = nx.intersection_array(nx.dodecahedral_graph())
+ assert b == [3, 2, 1, 1, 1]
+ assert c == [1, 1, 1, 2, 3]
+ b, c = nx.intersection_array(nx.icosahedral_graph())
+ assert b == [5, 2, 1]
+ assert c == [1, 2, 5]
+
+
+@pytest.mark.parametrize("f", (nx.is_distance_regular, nx.is_strongly_regular))
+def test_empty_graph_raises(f):
+ G = nx.Graph()
+ with pytest.raises(nx.NetworkXPointlessConcept, match="Graph has no nodes"):
+ f(G)
+
+
+class TestStronglyRegular:
+ """Unit tests for the :func:`~networkx.is_strongly_regular`
+ function.
+
+ """
+
+ def test_cycle_graph(self):
+ """Tests that the cycle graph on five vertices is strongly
+ regular.
+
+ """
+ G = nx.cycle_graph(5)
+ assert is_strongly_regular(G)
+
+ def test_petersen_graph(self):
+ """Tests that the Petersen graph is strongly regular."""
+ G = nx.petersen_graph()
+ assert is_strongly_regular(G)
+
+ def test_path_graph(self):
+ """Tests that the path graph is not strongly regular."""
+ G = nx.path_graph(4)
+ assert not is_strongly_regular(G)
diff --git a/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_matching.py b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_matching.py
new file mode 100644
index 0000000000000000000000000000000000000000..37853e3896c0fd6bcac1f46524a844ae2e2fb518
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/networkx/algorithms/tests/test_matching.py
@@ -0,0 +1,605 @@
+import math
+from itertools import permutations
+
+from pytest import raises
+
+import networkx as nx
+from networkx.algorithms.matching import matching_dict_to_set
+from networkx.utils import edges_equal
+
+
+class TestMaxWeightMatching:
+ """Unit tests for the
+ :func:`~networkx.algorithms.matching.max_weight_matching` function.
+
+ """
+
+ def test_trivial1(self):
+ """Empty graph"""
+ G = nx.Graph()
+ assert nx.max_weight_matching(G) == set()
+ assert nx.min_weight_matching(G) == set()
+
+ def test_selfloop(self):
+ G = nx.Graph()
+ G.add_edge(0, 0, weight=100)
+ assert nx.max_weight_matching(G) == set()
+ assert nx.min_weight_matching(G) == set()
+
+ def test_single_edge(self):
+ G = nx.Graph()
+ G.add_edge(0, 1)
+ assert edges_equal(
+ nx.max_weight_matching(G), matching_dict_to_set({0: 1, 1: 0})
+ )
+ assert edges_equal(
+ nx.min_weight_matching(G), matching_dict_to_set({0: 1, 1: 0})
+ )
+
+ def test_two_path(self):
+ G = nx.Graph()
+ G.add_edge("one", "two", weight=10)
+ G.add_edge("two", "three", weight=11)
+ assert edges_equal(
+ nx.max_weight_matching(G),
+ matching_dict_to_set({"three": "two", "two": "three"}),
+ )
+ assert edges_equal(
+ nx.min_weight_matching(G),
+ matching_dict_to_set({"one": "two", "two": "one"}),
+ )
+
+ def test_path(self):
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=5)
+ G.add_edge(2, 3, weight=11)
+ G.add_edge(3, 4, weight=5)
+ assert edges_equal(
+ nx.max_weight_matching(G), matching_dict_to_set({2: 3, 3: 2})
+ )
+ assert edges_equal(
+ nx.max_weight_matching(G, 1), matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3})
+ )
+ assert edges_equal(
+ nx.min_weight_matching(G), matching_dict_to_set({1: 2, 3: 4})
+ )
+ assert edges_equal(
+ nx.min_weight_matching(G, 1), matching_dict_to_set({1: 2, 3: 4})
+ )
+
+ def test_square(self):
+ G = nx.Graph()
+ G.add_edge(1, 4, weight=2)
+ G.add_edge(2, 3, weight=2)
+ G.add_edge(1, 2, weight=1)
+ G.add_edge(3, 4, weight=4)
+ assert edges_equal(
+ nx.max_weight_matching(G), matching_dict_to_set({1: 2, 3: 4})
+ )
+ assert edges_equal(
+ nx.min_weight_matching(G), matching_dict_to_set({1: 4, 2: 3})
+ )
+
+ def test_edge_attribute_name(self):
+ G = nx.Graph()
+ G.add_edge("one", "two", weight=10, abcd=11)
+ G.add_edge("two", "three", weight=11, abcd=10)
+ assert edges_equal(
+ nx.max_weight_matching(G, weight="abcd"),
+ matching_dict_to_set({"one": "two", "two": "one"}),
+ )
+ assert edges_equal(
+ nx.min_weight_matching(G, weight="abcd"),
+ matching_dict_to_set({"three": "two"}),
+ )
+
+ def test_floating_point_weights(self):
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=math.pi)
+ G.add_edge(2, 3, weight=math.exp(1))
+ G.add_edge(1, 3, weight=3.0)
+ G.add_edge(1, 4, weight=math.sqrt(2.0))
+ assert edges_equal(
+ nx.max_weight_matching(G), matching_dict_to_set({1: 4, 2: 3, 3: 2, 4: 1})
+ )
+ assert edges_equal(
+ nx.min_weight_matching(G), matching_dict_to_set({1: 4, 2: 3, 3: 2, 4: 1})
+ )
+
+ def test_negative_weights(self):
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=2)
+ G.add_edge(1, 3, weight=-2)
+ G.add_edge(2, 3, weight=1)
+ G.add_edge(2, 4, weight=-1)
+ G.add_edge(3, 4, weight=-6)
+ assert edges_equal(
+ nx.max_weight_matching(G), matching_dict_to_set({1: 2, 2: 1})
+ )
+ assert edges_equal(
+ nx.max_weight_matching(G, maxcardinality=True),
+ matching_dict_to_set({1: 3, 2: 4, 3: 1, 4: 2}),
+ )
+ assert edges_equal(
+ nx.min_weight_matching(G), matching_dict_to_set({1: 2, 3: 4})
+ )
+
+ def test_s_blossom(self):
+ """Create S-blossom and use it for augmentation:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from([(1, 2, 8), (1, 3, 9), (2, 3, 10), (3, 4, 7)])
+ answer = matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3})
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ G.add_weighted_edges_from([(1, 6, 5), (4, 5, 6)])
+ answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1})
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_s_t_blossom(self):
+ """Create S-blossom, relabel as T-blossom, use for augmentation:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [(1, 2, 9), (1, 3, 8), (2, 3, 10), (1, 4, 5), (4, 5, 4), (1, 6, 3)]
+ )
+ answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1})
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ G.add_edge(4, 5, weight=3)
+ G.add_edge(1, 6, weight=4)
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ G.remove_edge(1, 6)
+ G.add_edge(3, 6, weight=4)
+ answer = matching_dict_to_set({1: 2, 2: 1, 3: 6, 4: 5, 5: 4, 6: 3})
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_nested_s_blossom(self):
+ """Create nested S-blossom, use for augmentation:"""
+
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 9),
+ (1, 3, 9),
+ (2, 3, 10),
+ (2, 4, 8),
+ (3, 5, 8),
+ (4, 5, 10),
+ (5, 6, 6),
+ ]
+ )
+ dict_format = {1: 3, 2: 4, 3: 1, 4: 2, 5: 6, 6: 5}
+ expected = {frozenset(e) for e in matching_dict_to_set(dict_format)}
+ answer = {frozenset(e) for e in nx.max_weight_matching(G)}
+ assert answer == expected
+ answer = {frozenset(e) for e in nx.min_weight_matching(G)}
+ assert answer == expected
+
+ def test_nested_s_blossom_relabel(self):
+ """Create S-blossom, relabel as S, include in nested S-blossom:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 10),
+ (1, 7, 10),
+ (2, 3, 12),
+ (3, 4, 20),
+ (3, 5, 20),
+ (4, 5, 25),
+ (5, 6, 10),
+ (6, 7, 10),
+ (7, 8, 8),
+ ]
+ )
+ answer = matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3, 5: 6, 6: 5, 7: 8, 8: 7})
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_nested_s_blossom_expand(self):
+ """Create nested S-blossom, augment, expand recursively:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 8),
+ (1, 3, 8),
+ (2, 3, 10),
+ (2, 4, 12),
+ (3, 5, 12),
+ (4, 5, 14),
+ (4, 6, 12),
+ (5, 7, 12),
+ (6, 7, 14),
+ (7, 8, 12),
+ ]
+ )
+ answer = matching_dict_to_set({1: 2, 2: 1, 3: 5, 4: 6, 5: 3, 6: 4, 7: 8, 8: 7})
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_s_blossom_relabel_expand(self):
+ """Create S-blossom, relabel as T, expand:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 23),
+ (1, 5, 22),
+ (1, 6, 15),
+ (2, 3, 25),
+ (3, 4, 22),
+ (4, 5, 25),
+ (4, 8, 14),
+ (5, 7, 13),
+ ]
+ )
+ answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4})
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_nested_s_blossom_relabel_expand(self):
+ """Create nested S-blossom, relabel as T, expand:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 19),
+ (1, 3, 20),
+ (1, 8, 8),
+ (2, 3, 25),
+ (2, 4, 18),
+ (3, 5, 18),
+ (4, 5, 13),
+ (4, 7, 7),
+ (5, 6, 7),
+ ]
+ )
+ answer = matching_dict_to_set({1: 8, 2: 3, 3: 2, 4: 7, 5: 6, 6: 5, 7: 4, 8: 1})
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_nasty_blossom1(self):
+ """Create blossom, relabel as T in more than one way, expand,
+ augment:
+ """
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 45),
+ (1, 5, 45),
+ (2, 3, 50),
+ (3, 4, 45),
+ (4, 5, 50),
+ (1, 6, 30),
+ (3, 9, 35),
+ (4, 8, 35),
+ (5, 7, 26),
+ (9, 10, 5),
+ ]
+ )
+ ansdict = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9}
+ answer = matching_dict_to_set(ansdict)
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_nasty_blossom2(self):
+ """Again but slightly different:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 45),
+ (1, 5, 45),
+ (2, 3, 50),
+ (3, 4, 45),
+ (4, 5, 50),
+ (1, 6, 30),
+ (3, 9, 35),
+ (4, 8, 26),
+ (5, 7, 40),
+ (9, 10, 5),
+ ]
+ )
+ ans = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9}
+ answer = matching_dict_to_set(ans)
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_nasty_blossom_least_slack(self):
+ """Create blossom, relabel as T, expand such that a new
+ least-slack S-to-free dge is produced, augment:
+ """
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 45),
+ (1, 5, 45),
+ (2, 3, 50),
+ (3, 4, 45),
+ (4, 5, 50),
+ (1, 6, 30),
+ (3, 9, 35),
+ (4, 8, 28),
+ (5, 7, 26),
+ (9, 10, 5),
+ ]
+ )
+ ans = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9}
+ answer = matching_dict_to_set(ans)
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_nasty_blossom_augmenting(self):
+ """Create nested blossom, relabel as T in more than one way"""
+ # expand outer blossom such that inner blossom ends up on an
+ # augmenting path:
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 45),
+ (1, 7, 45),
+ (2, 3, 50),
+ (3, 4, 45),
+ (4, 5, 95),
+ (4, 6, 94),
+ (5, 6, 94),
+ (6, 7, 50),
+ (1, 8, 30),
+ (3, 11, 35),
+ (5, 9, 36),
+ (7, 10, 26),
+ (11, 12, 5),
+ ]
+ )
+ ans = {
+ 1: 8,
+ 2: 3,
+ 3: 2,
+ 4: 6,
+ 5: 9,
+ 6: 4,
+ 7: 10,
+ 8: 1,
+ 9: 5,
+ 10: 7,
+ 11: 12,
+ 12: 11,
+ }
+ answer = matching_dict_to_set(ans)
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_nasty_blossom_expand_recursively(self):
+ """Create nested S-blossom, relabel as S, expand recursively:"""
+ G = nx.Graph()
+ G.add_weighted_edges_from(
+ [
+ (1, 2, 40),
+ (1, 3, 40),
+ (2, 3, 60),
+ (2, 4, 55),
+ (3, 5, 55),
+ (4, 5, 50),
+ (1, 8, 15),
+ (5, 7, 30),
+ (7, 6, 10),
+ (8, 10, 10),
+ (4, 9, 30),
+ ]
+ )
+ ans = {1: 2, 2: 1, 3: 5, 4: 9, 5: 3, 6: 7, 7: 6, 8: 10, 9: 4, 10: 8}
+ answer = matching_dict_to_set(ans)
+ assert edges_equal(nx.max_weight_matching(G), answer)
+ assert edges_equal(nx.min_weight_matching(G), answer)
+
+ def test_wrong_graph_type(self):
+ error = nx.NetworkXNotImplemented
+ raises(error, nx.max_weight_matching, nx.MultiGraph())
+ raises(error, nx.max_weight_matching, nx.MultiDiGraph())
+ raises(error, nx.max_weight_matching, nx.DiGraph())
+ raises(error, nx.min_weight_matching, nx.DiGraph())
+
+
+class TestIsMatching:
+ """Unit tests for the
+ :func:`~networkx.algorithms.matching.is_matching` function.
+
+ """
+
+ def test_dict(self):
+ G = nx.path_graph(4)
+ assert nx.is_matching(G, {0: 1, 1: 0, 2: 3, 3: 2})
+
+ def test_empty_matching(self):
+ G = nx.path_graph(4)
+ assert nx.is_matching(G, set())
+
+ def test_single_edge(self):
+ G = nx.path_graph(4)
+ assert nx.is_matching(G, {(1, 2)})
+
+ def test_edge_order(self):
+ G = nx.path_graph(4)
+ assert nx.is_matching(G, {(0, 1), (2, 3)})
+ assert nx.is_matching(G, {(1, 0), (2, 3)})
+ assert nx.is_matching(G, {(0, 1), (3, 2)})
+ assert nx.is_matching(G, {(1, 0), (3, 2)})
+
+ def test_valid_matching(self):
+ G = nx.path_graph(4)
+ assert nx.is_matching(G, {(0, 1), (2, 3)})
+
+ def test_invalid_input(self):
+ error = nx.NetworkXError
+ G = nx.path_graph(4)
+ # edge to node not in G
+ raises(error, nx.is_matching, G, {(0, 5), (2, 3)})
+ # edge not a 2-tuple
+ raises(error, nx.is_matching, G, {(0, 1, 2), (2, 3)})
+ raises(error, nx.is_matching, G, {(0,), (2, 3)})
+
+ def test_selfloops(self):
+ error = nx.NetworkXError
+ G = nx.path_graph(4)
+ # selfloop for node not in G
+ raises(error, nx.is_matching, G, {(5, 5), (2, 3)})
+ # selfloop edge not in G
+ assert not nx.is_matching(G, {(0, 0), (1, 2), (2, 3)})
+ # selfloop edge in G
+ G.add_edge(0, 0)
+ assert not nx.is_matching(G, {(0, 0), (1, 2)})
+
+ def test_invalid_matching(self):
+ G = nx.path_graph(4)
+ assert not nx.is_matching(G, {(0, 1), (1, 2), (2, 3)})
+
+ def test_invalid_edge(self):
+ G = nx.path_graph(4)
+ assert not nx.is_matching(G, {(0, 3), (1, 2)})
+ raises(nx.NetworkXError, nx.is_matching, G, {(0, 55)})
+
+ G = nx.DiGraph(G.edges)
+ assert nx.is_matching(G, {(0, 1)})
+ assert not nx.is_matching(G, {(1, 0)})
+
+
+class TestIsMaximalMatching:
+ """Unit tests for the
+ :func:`~networkx.algorithms.matching.is_maximal_matching` function.
+
+ """
+
+ def test_dict(self):
+ G = nx.path_graph(4)
+ assert nx.is_maximal_matching(G, {0: 1, 1: 0, 2: 3, 3: 2})
+
+ def test_invalid_input(self):
+ error = nx.NetworkXError
+ G = nx.path_graph(4)
+ # edge to node not in G
+ raises(error, nx.is_maximal_matching, G, {(0, 5)})
+ raises(error, nx.is_maximal_matching, G, {(5, 0)})
+ # edge not a 2-tuple
+ raises(error, nx.is_maximal_matching, G, {(0, 1, 2), (2, 3)})
+ raises(error, nx.is_maximal_matching, G, {(0,), (2, 3)})
+
+ def test_valid(self):
+ G = nx.path_graph(4)
+ assert nx.is_maximal_matching(G, {(0, 1), (2, 3)})
+
+ def test_not_matching(self):
+ G = nx.path_graph(4)
+ assert not nx.is_maximal_matching(G, {(0, 1), (1, 2), (2, 3)})
+ assert not nx.is_maximal_matching(G, {(0, 3)})
+ G.add_edge(0, 0)
+ assert not nx.is_maximal_matching(G, {(0, 0)})
+
+ def test_not_maximal(self):
+ G = nx.path_graph(4)
+ assert not nx.is_maximal_matching(G, {(0, 1)})
+
+
+class TestIsPerfectMatching:
+ """Unit tests for the
+ :func:`~networkx.algorithms.matching.is_perfect_matching` function.
+
+ """
+
+ def test_dict(self):
+ G = nx.path_graph(4)
+ assert nx.is_perfect_matching(G, {0: 1, 1: 0, 2: 3, 3: 2})
+
+ def test_valid(self):
+ G = nx.path_graph(4)
+ assert nx.is_perfect_matching(G, {(0, 1), (2, 3)})
+
+ def test_valid_not_path(self):
+ G = nx.cycle_graph(4)
+ G.add_edge(0, 4)
+ G.add_edge(1, 4)
+ G.add_edge(5, 2)
+
+ assert nx.is_perfect_matching(G, {(1, 4), (0, 3), (5, 2)})
+
+ def test_invalid_input(self):
+ error = nx.NetworkXError
+ G = nx.path_graph(4)
+ # edge to node not in G
+ raises(error, nx.is_perfect_matching, G, {(0, 5)})
+ raises(error, nx.is_perfect_matching, G, {(5, 0)})
+ # edge not a 2-tuple
+ raises(error, nx.is_perfect_matching, G, {(0, 1, 2), (2, 3)})
+ raises(error, nx.is_perfect_matching, G, {(0,), (2, 3)})
+
+ def test_selfloops(self):
+ error = nx.NetworkXError
+ G = nx.path_graph(4)
+ # selfloop for node not in G
+ raises(error, nx.is_perfect_matching, G, {(5, 5), (2, 3)})
+ # selfloop edge not in G
+ assert not nx.is_perfect_matching(G, {(0, 0), (1, 2), (2, 3)})
+ # selfloop edge in G
+ G.add_edge(0, 0)
+ assert not nx.is_perfect_matching(G, {(0, 0), (1, 2)})
+
+ def test_not_matching(self):
+ G = nx.path_graph(4)
+ assert not nx.is_perfect_matching(G, {(0, 3)})
+ assert not nx.is_perfect_matching(G, {(0, 1), (1, 2), (2, 3)})
+
+ def test_maximal_but_not_perfect(self):
+ G = nx.cycle_graph(4)
+ G.add_edge(0, 4)
+ G.add_edge(1, 4)
+
+ assert not nx.is_perfect_matching(G, {(1, 4), (0, 3)})
+
+
+class TestMaximalMatching:
+ """Unit tests for the
+ :func:`~networkx.algorithms.matching.maximal_matching`.
+
+ """
+
+ def test_valid_matching(self):
+ edges = [(1, 2), (1, 5), (2, 3), (2, 5), (3, 4), (3, 6), (5, 6)]
+ G = nx.Graph(edges)
+ matching = nx.maximal_matching(G)
+ assert nx.is_maximal_matching(G, matching)
+
+ def test_single_edge_matching(self):
+ # In the star graph, any maximal matching has just one edge.
+ G = nx.star_graph(5)
+ matching = nx.maximal_matching(G)
+ assert 1 == len(matching)
+ assert nx.is_maximal_matching(G, matching)
+
+ def test_self_loops(self):
+ # Create the path graph with two self-loops.
+ G = nx.path_graph(3)
+ G.add_edges_from([(0, 0), (1, 1)])
+ matching = nx.maximal_matching(G)
+ assert len(matching) == 1
+ # The matching should never include self-loops.
+ assert not any(u == v for u, v in matching)
+ assert nx.is_maximal_matching(G, matching)
+
+ def test_ordering(self):
+ """Tests that a maximal matching is computed correctly
+ regardless of the order in which nodes are added to the graph.
+
+ """
+ for nodes in permutations(range(3)):
+ G = nx.Graph()
+ G.add_nodes_from(nodes)
+ G.add_edges_from([(0, 1), (0, 2)])
+ matching = nx.maximal_matching(G)
+ assert len(matching) == 1
+ assert nx.is_maximal_matching(G, matching)
+
+ def test_wrong_graph_type(self):
+ error = nx.NetworkXNotImplemented
+ raises(error, nx.maximal_matching, nx.MultiGraph())
+ raises(error, nx.maximal_matching, nx.MultiDiGraph())
+ raises(error, nx.maximal_matching, nx.DiGraph())
diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/LICENSE b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..353924be0e59b9ad7e6c22848c2189398481821d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/LICENSE
@@ -0,0 +1,19 @@
+Copyright Jason R. Coombs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..8e323f4e92af7e3765878dc0d145359389cb41d1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/METADATA
@@ -0,0 +1,124 @@
+Metadata-Version: 2.1
+Name: setuptools
+Version: 59.6.0
+Summary: Easily download, build, install, upgrade, and uninstall Python packages
+Home-page: https://github.com/pypa/setuptools
+Author: Python Packaging Authority
+Author-email: distutils-sig@python.org
+License: UNKNOWN
+Project-URL: Documentation, https://setuptools.pypa.io/
+Keywords: CPAN PyPI distutils eggs package management
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=3.6
+License-File: LICENSE
+Provides-Extra: certs
+Provides-Extra: docs
+Requires-Dist: furo ; extra == 'docs'
+Requires-Dist: jaraco.packaging (>=8.2) ; extra == 'docs'
+Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs'
+Requires-Dist: pygments-github-lexers (==0.0.5) ; extra == 'docs'
+Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
+Requires-Dist: sphinx ; extra == 'docs'
+Requires-Dist: sphinx-inline-tabs ; extra == 'docs'
+Requires-Dist: sphinxcontrib-towncrier ; extra == 'docs'
+Provides-Extra: ssl
+Provides-Extra: testing
+Requires-Dist: flake8-2020 ; extra == 'testing'
+Requires-Dist: jaraco.envs (>=2.2) ; extra == 'testing'
+Requires-Dist: jaraco.path (>=3.2.0) ; extra == 'testing'
+Requires-Dist: mock ; extra == 'testing'
+Requires-Dist: paver ; extra == 'testing'
+Requires-Dist: pip (>=19.1) ; extra == 'testing'
+Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing'
+Requires-Dist: pytest-cov ; extra == 'testing'
+Requires-Dist: pytest-enabler (>=1.0.1) ; extra == 'testing'
+Requires-Dist: pytest-flake8 ; extra == 'testing'
+Requires-Dist: pytest-virtualenv (>=1.2.7) ; extra == 'testing'
+Requires-Dist: pytest-xdist ; extra == 'testing'
+Requires-Dist: pytest (>=6) ; extra == 'testing'
+Requires-Dist: sphinx ; extra == 'testing'
+Requires-Dist: virtualenv (>=13.0.0) ; extra == 'testing'
+Requires-Dist: wheel ; extra == 'testing'
+Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
+Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing'
+
+.. image:: https://raw.githubusercontent.com/pypa/setuptools/main/docs/images/banner-640x320.svg
+ :align: center
+
+|
+
+.. image:: https://img.shields.io/pypi/v/setuptools.svg
+ :target: `PyPI link`_
+
+.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
+ :target: `PyPI link`_
+
+.. _PyPI link: https://pypi.org/project/setuptools
+
+.. image:: https://github.com/pypa/setuptools/workflows/tests/badge.svg
+ :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22
+ :alt: tests
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+ :alt: Code style: Black
+
+.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
+ :target: https://setuptools.pypa.io
+
+.. image:: https://img.shields.io/badge/skeleton-2021-informational
+ :target: https://blog.jaraco.com/skeleton
+
+.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
+ :target: https://codecov.io/gh/pypa/setuptools
+
+.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
+ :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
+
+See the `Installation Instructions
+`_ in the Python Packaging
+User's Guide for instructions on installing, upgrading, and uninstalling
+Setuptools.
+
+Questions and comments should be directed to the `distutils-sig
+mailing list `_.
+Bug reports and especially tested patches may be
+submitted directly to the `bug tracker
+`_.
+
+
+Code of Conduct
+===============
+
+Everyone interacting in the setuptools project's codebases, issue trackers,
+chat rooms, and mailing lists is expected to follow the
+`PSF Code of Conduct `_.
+
+
+For Enterprise
+==============
+
+Available as part of the Tidelift Subscription.
+
+Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
+
+`Learn more `_.
+
+
+Security Contact
+================
+
+To report a security vulnerability, please use the
+`Tidelift security contact `_.
+Tidelift will coordinate the fix and disclosure.
+
+
diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..def46084e349fbb8f517ba0ecbc39747f3f761cc
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/RECORD
@@ -0,0 +1,298 @@
+_distutils_hack/__init__.py,sha256=TCUx2qEhWNyruLzj4DOGZAQH39hm2fJ_wDd90olNOmo,3759
+_distutils_hack/__pycache__/__init__.cpython-310.pyc,,
+_distutils_hack/__pycache__/override.cpython-310.pyc,,
+_distutils_hack/override.py,sha256=Eu_s-NF6VIZ4Cqd0tbbA5wtWky2IZPNd8et6GLt1mzo,44
+distutils-precedence.pth,sha256=fqf_7z_ioRfuEsaO1lU2F_DX_S8FkCV8JcSElZo7c3M,152
+pkg_resources/__init__.py,sha256=uAnPq8FsTXHAEHFWK7UU9AhdNjE4o5Skfk8CyfbztO8,108573
+pkg_resources/__pycache__/__init__.cpython-310.pyc,,
+pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pkg_resources/_vendor/__pycache__/__init__.cpython-310.pyc,,
+pkg_resources/_vendor/__pycache__/appdirs.cpython-310.pyc,,
+pkg_resources/_vendor/__pycache__/pyparsing.cpython-310.pyc,,
+pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701
+pkg_resources/_vendor/packaging/__about__.py,sha256=IIRHpOsJlJSgkjq1UoeBoMTqhvNp3gN9FyMb5Kf8El4,661
+pkg_resources/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497
+pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/markers.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc,,
+pkg_resources/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488
+pkg_resources/_vendor/packaging/_musllinux.py,sha256=z5yeG1ygOPx4uUyLdqj-p8Dk5UBb5H_b0NIjW9yo8oA,4378
+pkg_resources/_vendor/packaging/_structures.py,sha256=TMiAgFbdUOPmIfDIfiHc3KFhSJ8kMjof2QS5I-2NyQ8,1629
+pkg_resources/_vendor/packaging/markers.py,sha256=gFSKoBTb0sKDw1v_apJy15lPr0v2mEvuEkfooTtcWx4,8496
+pkg_resources/_vendor/packaging/requirements.py,sha256=uJ4cjwm3_nrfHJLCcGU9mT5aw8SXfw8v1aBUD7OFuVs,4706
+pkg_resources/_vendor/packaging/specifiers.py,sha256=MZ-fYcNL3u7pNrt-6g2EQO7AbRXkjc-SPEYwXMQbLmc,30964
+pkg_resources/_vendor/packaging/tags.py,sha256=vGybAUQYlPKMcukzX_2e65fmafnFFuMbD25naYTEwtc,15710
+pkg_resources/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200
+pkg_resources/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665
+pkg_resources/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
+pkg_resources/extern/__init__.py,sha256=3PixaT9Tzzd4NoyV6CVhGd7S_9Z-U5yvMWAftZKvC6k,2362
+pkg_resources/extern/__pycache__/__init__.cpython-310.pyc,,
+pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc,,
+pkg_resources/tests/data/my-test-package-source/setup.py,sha256=Mrezl3nqxkYkjCYpIxmjhhg4AR8hgi4QZdEYmk-I7R8,104
+setuptools-59.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+setuptools-59.6.0.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050
+setuptools-59.6.0.dist-info/METADATA,sha256=wis8J-_8PwCf5xGTjZ520vMjGCF94516nC1ml1ebyX4,4963
+setuptools-59.6.0.dist-info/RECORD,,
+setuptools-59.6.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+setuptools-59.6.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
+setuptools-59.6.0.dist-info/entry_points.txt,sha256=wpnhLrbtyk4hZ1qCCw48cCSxoQPzULMhIuaFqsB7GxQ,2636
+setuptools-59.6.0.dist-info/top_level.txt,sha256=1Euo4fJMWPMc6iG8BrvoHx4c65FnzA7Mv_p3en0BDds,48
+setuptools/__init__.py,sha256=l7ULo8jGk-4-8jbacmJ58cYpSRX4swS1ccbJaJVAGdM,7448
+setuptools/__pycache__/__init__.cpython-310.pyc,,
+setuptools/__pycache__/_deprecation_warning.cpython-310.pyc,,
+setuptools/__pycache__/_imp.cpython-310.pyc,,
+setuptools/__pycache__/archive_util.cpython-310.pyc,,
+setuptools/__pycache__/build_meta.cpython-310.pyc,,
+setuptools/__pycache__/config.cpython-310.pyc,,
+setuptools/__pycache__/dep_util.cpython-310.pyc,,
+setuptools/__pycache__/depends.cpython-310.pyc,,
+setuptools/__pycache__/dist.cpython-310.pyc,,
+setuptools/__pycache__/errors.cpython-310.pyc,,
+setuptools/__pycache__/extension.cpython-310.pyc,,
+setuptools/__pycache__/glob.cpython-310.pyc,,
+setuptools/__pycache__/installer.cpython-310.pyc,,
+setuptools/__pycache__/launch.cpython-310.pyc,,
+setuptools/__pycache__/monkey.cpython-310.pyc,,
+setuptools/__pycache__/msvc.cpython-310.pyc,,
+setuptools/__pycache__/namespaces.cpython-310.pyc,,
+setuptools/__pycache__/package_index.cpython-310.pyc,,
+setuptools/__pycache__/py34compat.cpython-310.pyc,,
+setuptools/__pycache__/sandbox.cpython-310.pyc,,
+setuptools/__pycache__/unicode_utils.cpython-310.pyc,,
+setuptools/__pycache__/version.cpython-310.pyc,,
+setuptools/__pycache__/wheel.cpython-310.pyc,,
+setuptools/__pycache__/windows_support.cpython-310.pyc,,
+setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218
+setuptools/_distutils/__init__.py,sha256=3YtkfadGoU57VMEQFk2TNyMZVud1kDkakWQLhWg2Fm8,536
+setuptools/_distutils/__pycache__/__init__.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/_msvccompiler.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/archive_util.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/bcppcompiler.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/ccompiler.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/cmd.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/config.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/core.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/cygwinccompiler.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/debug.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/dep_util.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/dir_util.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/dist.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/errors.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/extension.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/fancy_getopt.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/file_util.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/filelist.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/log.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/msvc9compiler.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/msvccompiler.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/py35compat.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/py38compat.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/spawn.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/sysconfig.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/text_file.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/unixccompiler.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/util.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/version.cpython-310.pyc,,
+setuptools/_distutils/__pycache__/versionpredicate.cpython-310.pyc,,
+setuptools/_distutils/_msvccompiler.py,sha256=jR0JM5A1JMnZ6xMDicQzhXWgXTVXs1lWAeUexC1z198,20813
+setuptools/_distutils/archive_util.py,sha256=qW-uiGwYexTvK5e-iSel_31Dshx-CqTanNPK6snwf98,8572
+setuptools/_distutils/bcppcompiler.py,sha256=OJDVpCUmX6H8v_7lV1zifV1fcx92Cr2dhiUh6989UJI,14894
+setuptools/_distutils/ccompiler.py,sha256=YbernlpGZZqKnfzZSfJ814fINca8cicZiUlBjyUPyaM,47644
+setuptools/_distutils/cmd.py,sha256=eco6LAGUtobLuPafuhmgKgkwRRL_WY8KJ4YeDCHpcls,18079
+setuptools/_distutils/command/__init__.py,sha256=2TA-rlNDlzeI-csbWHXFjGD8uOYqALMfyWOhT49nC6g,799
+setuptools/_distutils/command/__pycache__/__init__.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/bdist.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/bdist_dumb.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/bdist_msi.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/bdist_rpm.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/bdist_wininst.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/build.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/build_clib.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/build_ext.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/build_py.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/build_scripts.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/check.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/clean.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/config.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/install.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/install_data.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/install_egg_info.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/install_headers.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/install_lib.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/install_scripts.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/py37compat.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/register.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/sdist.cpython-310.pyc,,
+setuptools/_distutils/command/__pycache__/upload.cpython-310.pyc,,
+setuptools/_distutils/command/bdist.py,sha256=2z4eudRl_n7m3lG9leL0IYqes4bsm8c0fxfZuiafjMg,5562
+setuptools/_distutils/command/bdist_dumb.py,sha256=BTur9jcIppyP7Piavjfsk7YjElqvxeYO2npUyPPOekc,4913
+setuptools/_distutils/command/bdist_msi.py,sha256=EVFQYN_X-ExeeP8gmdV9JcINsuUGsLJUz9afMU0Rt8c,35579
+setuptools/_distutils/command/bdist_rpm.py,sha256=gjOw22GhDSbcq0bdq25cTb-n6HWWm0bShLQad_mkJ4k,21537
+setuptools/_distutils/command/bdist_wininst.py,sha256=iGlaI-VfElHOneeczKHWnSN5a10-7IMcJaXuR1mdS3c,16030
+setuptools/_distutils/command/build.py,sha256=1AF-dxN_NlOEyoydBz19AwpeWYPSYCZvOLJSN_PdatY,5773
+setuptools/_distutils/command/build_clib.py,sha256=bgVTHh28eLQA2Gkw68amApd_j7qQBX4MTI-zTvAK_J4,8022
+setuptools/_distutils/command/build_ext.py,sha256=KgxpopuD6sqep0LsumMH15joWih0VdbnXpYm-ETNjoE,31612
+setuptools/_distutils/command/build_py.py,sha256=hXesMrH_epNj6K8SUtJdipgEis3EdICKeZ8VWe_ndck,16495
+setuptools/_distutils/command/build_scripts.py,sha256=urdn6wPxPMW5dLqpqFkZ8dqaFG1tf9TiAao6U9LCoEI,5963
+setuptools/_distutils/command/check.py,sha256=5qDtI75ccZg3sAItQWeaIu8y3FR314O4rr9Smz4HsEo,5637
+setuptools/_distutils/command/clean.py,sha256=2TCt47ru4hZZM0RfVfUYj5bbpicpGLP4Qhw5jBtvp9k,2776
+setuptools/_distutils/command/config.py,sha256=2aTjww3PwjMB8-ZibCe4P7B-qG1hM1gn_rJXYyxRz6c,13117
+setuptools/_distutils/command/install.py,sha256=zX_OITRItDnNAv0iVjXdFVitf3f63tHzK_mZ1sIxsuc,28970
+setuptools/_distutils/command/install_data.py,sha256=YhGOAwh3gJPqF7em5XA0rmpR42z1bLh80ooElzDyUvk,2822
+setuptools/_distutils/command/install_egg_info.py,sha256=WijZ7cHMAkNMMCwrZ--KoqV9M2RtLouU4-qSbiCwv70,2753
+setuptools/_distutils/command/install_headers.py,sha256=XQ6idkbIDfr1ljXCOznuVUMvOFpHBn6cK0Wz9gIM2b4,1298
+setuptools/_distutils/command/install_lib.py,sha256=9AofR-MO9lAtjwwuukCptepOaJEKMZW2VHiyR5hU7HA,8397
+setuptools/_distutils/command/install_scripts.py,sha256=_CLUeQwGJRcY2kik7azPMn5IdtDCrjWdUvZ1khlG6ck,2017
+setuptools/_distutils/command/py37compat.py,sha256=qzRhhvTihqx_PZZt2ZYECxh1X3Oj255VqatzelYFAKw,671
+setuptools/_distutils/command/register.py,sha256=2jaq9968rt2puRVDBx1HbNiXv27uOk8idE_4lPf_3VM,11712
+setuptools/_distutils/command/sdist.py,sha256=qotJjAOzyhJjq2-oDImjNFrOtaSneEFDJTB-sEk1wnU,19005
+setuptools/_distutils/command/upload.py,sha256=BLO1w7eSAqsCjCLXtf_CRVSjwF1WmyOByGVGNdcQ8oY,7597
+setuptools/_distutils/config.py,sha256=dtHgblx9JhfyrKx1-J7Jlxw_f7s8ZbPFQii2UWMTZpY,4827
+setuptools/_distutils/core.py,sha256=0v7Emh9y0AW9o4AEjfVMhDxKzTFWFxUQn46spFSL56g,9282
+setuptools/_distutils/cygwinccompiler.py,sha256=MhRmF3G0-5doB6XqCuNCvHIXcgUva-OulDwJRAjZzHY,17330
+setuptools/_distutils/debug.py,sha256=N6MrTAqK6l9SVk6tWweR108PM8Ol7qNlfyV-nHcLhsY,139
+setuptools/_distutils/dep_util.py,sha256=GuR9Iw_jzZRkyemJ5HX8rB_wRGxkIBcBm1qh54r7zhk,3491
+setuptools/_distutils/dir_util.py,sha256=UwhBOUTcV65GTwce4SPuTXR8Z8q3LYEcmttqcGb0bYo,7778
+setuptools/_distutils/dist.py,sha256=Biuf6ca8uiFfMScRFsYUKtb5neMPtxKxRtXn50_1f3U,50421
+setuptools/_distutils/errors.py,sha256=Yr6tKZGdzBoNi53vBtiq0UJ__X05CmxSdQJqOWaw6SY,3577
+setuptools/_distutils/extension.py,sha256=bTb3Q0CoevGKYv5dX1ls--Ln8tlB0-UEOsi9BwzlZ-s,10515
+setuptools/_distutils/fancy_getopt.py,sha256=OPxp2CxHi1Yp_d1D8JxW4Ueq9fC71tegQFaafh58GGU,17784
+setuptools/_distutils/file_util.py,sha256=0hUqfItN_x2DVihR0MHdA4KCMVCOO8VoByaFp_a6MDg,8148
+setuptools/_distutils/filelist.py,sha256=Z9f5hvepZnpniZ2IFmCnWIjdviWozs8sbARBhWajwoM,13407
+setuptools/_distutils/log.py,sha256=hWBmdUC2K927QcVv3REMW3HMPclxccPQngxLSuUXQl0,1969
+setuptools/_distutils/msvc9compiler.py,sha256=X623B92g0v8A3BEM9qpRf396AEd_hfjkfDUVTKu0hcE,30453
+setuptools/_distutils/msvccompiler.py,sha256=qruALeGRq8-CjtjE2tLQ8W26QnchcYedWzFme8AxZ4Q,23540
+setuptools/_distutils/py35compat.py,sha256=-sk1vBIsOgH-AobjIYbK_OEjdJF_54Ul_D1EiE9XM_c,455
+setuptools/_distutils/py38compat.py,sha256=II7ddBxOijC7uNN4z_46HYUjwYTJYMNiLJoGTormZm0,212
+setuptools/_distutils/spawn.py,sha256=4uE9k3VZWijxy7E_Rlcmh1MoamaPJ8rajdNBagKxjgU,3498
+setuptools/_distutils/sysconfig.py,sha256=k3fzINx3-qjge0udI6fC1UQSDPYpMGrxeSuV9cY4rmU,22151
+setuptools/_distutils/text_file.py,sha256=PsuAJeWdKJoLSV_6N6IpB5-0Pa84KzLUucJMFRazw3I,12483
+setuptools/_distutils/unixccompiler.py,sha256=u2Sfs6LRmqQux4nZW08GwDtoFMded6wYnkiaO2TvKC4,14538
+setuptools/_distutils/util.py,sha256=0v7B6nIsAXP11A7xqS6FC6lFAdaIqzxz_C-at4aMcgs,20655
+setuptools/_distutils/version.py,sha256=syRvPxuMQxnftpuIKeRE-2ELQ_ZMCwMJ-o8ie-lxdZo,13015
+setuptools/_distutils/versionpredicate.py,sha256=vx4ND3BtMgxFR9iZ4_t3WFa-NdIKxO8vtOd0twBppxc,5277
+setuptools/_imp.py,sha256=HmF91IbitRfsD5z-g4_wmcuH-RahyIONbPgiCOFgtzA,2392
+setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+setuptools/_vendor/__pycache__/__init__.cpython-310.pyc,,
+setuptools/_vendor/__pycache__/ordered_set.cpython-310.pyc,,
+setuptools/_vendor/__pycache__/pyparsing.cpython-310.pyc,,
+setuptools/_vendor/more_itertools/__init__.py,sha256=C7sXffHTXM3P-iaLPPfqfmDoxOflQMJLcM7ed9p3jak,82
+setuptools/_vendor/more_itertools/__pycache__/__init__.cpython-310.pyc,,
+setuptools/_vendor/more_itertools/__pycache__/more.cpython-310.pyc,,
+setuptools/_vendor/more_itertools/__pycache__/recipes.cpython-310.pyc,,
+setuptools/_vendor/more_itertools/more.py,sha256=DlZa8v6JihVwfQ5zHidOA-xDE0orcQIUyxVnCaUoDKE,117968
+setuptools/_vendor/more_itertools/recipes.py,sha256=UkNkrsZyqiwgLHANBTmvMhCvaNSvSNYhyOpz_Jc55DY,16256
+setuptools/_vendor/ordered_set.py,sha256=dbaCcs27dyN9gnMWGF5nA_BrVn6Q-NrjKYJpV9_fgBs,15130
+setuptools/_vendor/packaging/__about__.py,sha256=IIRHpOsJlJSgkjq1UoeBoMTqhvNp3gN9FyMb5Kf8El4,661
+setuptools/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497
+setuptools/_vendor/packaging/__pycache__/__about__.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/__init__.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/_structures.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/markers.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/requirements.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/tags.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/utils.cpython-310.pyc,,
+setuptools/_vendor/packaging/__pycache__/version.cpython-310.pyc,,
+setuptools/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488
+setuptools/_vendor/packaging/_musllinux.py,sha256=z5yeG1ygOPx4uUyLdqj-p8Dk5UBb5H_b0NIjW9yo8oA,4378
+setuptools/_vendor/packaging/_structures.py,sha256=TMiAgFbdUOPmIfDIfiHc3KFhSJ8kMjof2QS5I-2NyQ8,1629
+setuptools/_vendor/packaging/markers.py,sha256=lihRgqpZjLM-JW-vxlLPqU3kmVe79g9vypy1kxmTRuQ,8493
+setuptools/_vendor/packaging/requirements.py,sha256=Opd0FjqgdEiWkzBLyo1oLU0Dj01uIFwTAnAJQrr6j2A,4700
+setuptools/_vendor/packaging/specifiers.py,sha256=MZ-fYcNL3u7pNrt-6g2EQO7AbRXkjc-SPEYwXMQbLmc,30964
+setuptools/_vendor/packaging/tags.py,sha256=vGybAUQYlPKMcukzX_2e65fmafnFFuMbD25naYTEwtc,15710
+setuptools/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200
+setuptools/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665
+setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
+setuptools/archive_util.py,sha256=maJDbozRbDeSPw53VT0cb_IS3W0Ap73lJR8tX8RZDx0,7077
+setuptools/build_meta.py,sha256=hCU742vjgXHY6oKPYttBkie-n4DVNAJrUOgn0O_V3nc,10536
+setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
+setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752
+setuptools/cli-arm64.exe,sha256=o9amxowudZ98NvNWh_a2DRY8LhoIRqTAekxABqltiMc,137216
+setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
+setuptools/command/__init__.py,sha256=e-8TJOikUe3St0fw2b2p9u5EDdSxl5zHUBJJKifbcQ8,217
+setuptools/command/__pycache__/__init__.cpython-310.pyc,,
+setuptools/command/__pycache__/alias.cpython-310.pyc,,
+setuptools/command/__pycache__/bdist_egg.cpython-310.pyc,,
+setuptools/command/__pycache__/bdist_rpm.cpython-310.pyc,,
+setuptools/command/__pycache__/build_clib.cpython-310.pyc,,
+setuptools/command/__pycache__/build_ext.cpython-310.pyc,,
+setuptools/command/__pycache__/build_py.cpython-310.pyc,,
+setuptools/command/__pycache__/develop.cpython-310.pyc,,
+setuptools/command/__pycache__/dist_info.cpython-310.pyc,,
+setuptools/command/__pycache__/easy_install.cpython-310.pyc,,
+setuptools/command/__pycache__/egg_info.cpython-310.pyc,,
+setuptools/command/__pycache__/install.cpython-310.pyc,,
+setuptools/command/__pycache__/install_egg_info.cpython-310.pyc,,
+setuptools/command/__pycache__/install_lib.cpython-310.pyc,,
+setuptools/command/__pycache__/install_scripts.cpython-310.pyc,,
+setuptools/command/__pycache__/py36compat.cpython-310.pyc,,
+setuptools/command/__pycache__/register.cpython-310.pyc,,
+setuptools/command/__pycache__/rotate.cpython-310.pyc,,
+setuptools/command/__pycache__/saveopts.cpython-310.pyc,,
+setuptools/command/__pycache__/sdist.cpython-310.pyc,,
+setuptools/command/__pycache__/setopt.cpython-310.pyc,,
+setuptools/command/__pycache__/test.cpython-310.pyc,,
+setuptools/command/__pycache__/upload.cpython-310.pyc,,
+setuptools/command/__pycache__/upload_docs.cpython-310.pyc,,
+setuptools/command/alias.py,sha256=1sLQxZcNh6dDQpDmm4G7UGGTol83nY1NTPmNBbm2siI,2381
+setuptools/command/bdist_egg.py,sha256=-upiB6fFtm8cQSQj1LRDVpG1-T143DsXCvV0fh03u7U,16604
+setuptools/command/bdist_rpm.py,sha256=PxrgoHPNaw2Pw2qNjjHDPC-Ay_IaDbCqP3d_5N-cj2A,1182
+setuptools/command/build_clib.py,sha256=fWHSFGkk10VCddBWCszvNhowbG9Z9CZXVjQ2uSInoOs,4415
+setuptools/command/build_ext.py,sha256=SNK042HfB2ezlDQbSVRGFqI1IM5A4AsjU1wpV3fgskE,13212
+setuptools/command/build_py.py,sha256=c90V1nVPEtYkdye-xvo-B48V5RLvSgD8JBMfPtUbtYw,8751
+setuptools/command/develop.py,sha256=5_Ss7ENd1_B_jVMY1tF5UV_y1Xu6jbVzAPG8oKeluGA,7012
+setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960
+setuptools/command/easy_install.py,sha256=oXURojITuMmOQ2ZcOJ_IKkm5ahyoT5tnA89jZ70CTds,87973
+setuptools/command/egg_info.py,sha256=5rV9PH6Eeics9xkpzx-nsTBL54S1S-Bf0r1liCtYPVI,26134
+setuptools/command/install.py,sha256=UynjFBgRyyHrDZRVAmXrXG0vChJAMx-sxnOO3JoAzVo,4906
+setuptools/command/install_egg_info.py,sha256=4zq_Ad3jE-EffParuyDEnvxU6efB-Xhrzdr8aB6Ln_8,3195
+setuptools/command/install_lib.py,sha256=4zK0nihAAwMYIoOS0UOBLZKSOCBbXXPKsTraO_a8qmk,5036
+setuptools/command/install_scripts.py,sha256=o0jN_ex7yYYk8W5clymTFOXwkFMKzW9q_zd9Npcex7M,2593
+setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628
+setuptools/command/py36compat.py,sha256=7yLWzQj179Enx3pJ8V1cDDCzeLMFMd9XJXlK-iZTq5Y,4946
+setuptools/command/register.py,sha256=kk3DxXCb5lXTvqnhfwx2g6q7iwbUmgTyXUCaBooBOUk,468
+setuptools/command/rotate.py,sha256=SvsQPasezIojPjvMnfkqzh8P0U0tCj0daczF8uc3NQM,2128
+setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658
+setuptools/command/sdist.py,sha256=2onJidYBPFpUgcX6J4KjZX5ilwciHPRB8VkID5YVaL0,6413
+setuptools/command/setopt.py,sha256=okxhqD1NM1nQlbSVDCNv6P7Y7g680sc2r-tUW7wPH1Y,5086
+setuptools/command/test.py,sha256=qGY-Hx1RPCndlVh2rsrEs5479CgmxRsrEflVLr98jVA,8088
+setuptools/command/upload.py,sha256=XT3YFVfYPAmA5qhGg0euluU98ftxRUW-PzKcODMLxUs,462
+setuptools/command/upload_docs.py,sha256=ba5kOyedD_u62weinrxqqnvpuQvBIuamXehJG6tAvO0,7218
+setuptools/config.py,sha256=O-T_28163qkEeaX8bLgqJLuOLYur15cC2_xpA0RENfM,23153
+setuptools/dep_util.py,sha256=BDx1BkzNQntvAB4alypHbW5UVBzjqths000PrUL4Zqc,949
+setuptools/depends.py,sha256=QYQIadr5DwLxPzkErhNt5hmRhvGhWxoXZMRXCm_jcQ0,5499
+setuptools/dist.py,sha256=73utfl0NHQ_Xfp5m3-wlbo7YaA31S_dkleh5P3GTKws,43162
+setuptools/errors.py,sha256=t4Rm85eXm71Ti0-PO1gAQMRK3V7NN3x1tcbcw0-xGSI,1555
+setuptools/extension.py,sha256=NMM46XjNdVelWemc0x8CyVKA5Ks6Zm3xTWSA2SS6xZM,1684
+setuptools/extern/__init__.py,sha256=Hhf9W73WAitw9TdRJfDIb6YFjmK56CF61afds1Mg0HY,2407
+setuptools/extern/__pycache__/__init__.cpython-310.pyc,,
+setuptools/glob.py,sha256=1oZjbfjAHSXbgdhSuR6YGU8jKob9L8NtEmBYqcPTLYk,4873
+setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
+setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264
+setuptools/gui-arm64.exe,sha256=TEFnOKDi-mq3ZszxqbCoCXTnM_lhUWjdIqBpr6fVs40,137728
+setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
+setuptools/installer.py,sha256=s6DQfsoICBJxbUqbduhOJtl1oG0S4yegRCg3EAs0i3M,3824
+setuptools/launch.py,sha256=TyPT-Ic1T2EnYvGO26gfNRP4ysBlrhpbRjQxWsiO414,812
+setuptools/monkey.py,sha256=0e3HdVKXHL415O7np-AUqhEFXPPuDdJKbI47chQ_DE4,5217
+setuptools/msvc.py,sha256=3LLt938e6OR7wWPzIvCQu7LCWZSIKqoKV6w3r8jV3kY,50561
+setuptools/namespaces.py,sha256=PMqGVPXPYQgjUTvEg9bGccRAkIODrQ6NmsDg_fwErwI,3093
+setuptools/package_index.py,sha256=egCu3CzZDtEwZL0psMfCkNJfkDryq1FgRkhFmr9rUPc,40103
+setuptools/py34compat.py,sha256=KYOd6ybRxjBW8NJmYD8t_UyyVmysppFXqHpFLdslGXU,245
+setuptools/sandbox.py,sha256=mR83i-mu-ZUU_7TaMgYCeRSyzkqv8loJ_GR9xhS2DDw,14348
+setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218
+setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138
+setuptools/unicode_utils.py,sha256=aOOFo4JGwAsiBttGYDsqFS7YqWQeZ2j6DWiCuctR_00,941
+setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144
+setuptools/wheel.py,sha256=0P8tSk105uF_Ub-30N2HU2X2v7MKDSdjpeQlRRW3SkI,8288
+setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714
diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/REQUESTED b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/entry_points.txt b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9466bf6320157d79e69d6940e2b09fb08f64da51
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/entry_points.txt
@@ -0,0 +1,56 @@
+[distutils.commands]
+alias = setuptools.command.alias:alias
+bdist_egg = setuptools.command.bdist_egg:bdist_egg
+bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
+build_clib = setuptools.command.build_clib:build_clib
+build_ext = setuptools.command.build_ext:build_ext
+build_py = setuptools.command.build_py:build_py
+develop = setuptools.command.develop:develop
+dist_info = setuptools.command.dist_info:dist_info
+easy_install = setuptools.command.easy_install:easy_install
+egg_info = setuptools.command.egg_info:egg_info
+install = setuptools.command.install:install
+install_egg_info = setuptools.command.install_egg_info:install_egg_info
+install_lib = setuptools.command.install_lib:install_lib
+install_scripts = setuptools.command.install_scripts:install_scripts
+rotate = setuptools.command.rotate:rotate
+saveopts = setuptools.command.saveopts:saveopts
+sdist = setuptools.command.sdist:sdist
+setopt = setuptools.command.setopt:setopt
+test = setuptools.command.test:test
+upload_docs = setuptools.command.upload_docs:upload_docs
+
+[distutils.setup_keywords]
+dependency_links = setuptools.dist:assert_string_list
+eager_resources = setuptools.dist:assert_string_list
+entry_points = setuptools.dist:check_entry_points
+exclude_package_data = setuptools.dist:check_package_data
+extras_require = setuptools.dist:check_extras
+include_package_data = setuptools.dist:assert_bool
+install_requires = setuptools.dist:check_requirements
+namespace_packages = setuptools.dist:check_nsp
+package_data = setuptools.dist:check_package_data
+packages = setuptools.dist:check_packages
+python_requires = setuptools.dist:check_specifier
+setup_requires = setuptools.dist:check_requirements
+test_loader = setuptools.dist:check_importable
+test_runner = setuptools.dist:check_importable
+test_suite = setuptools.dist:check_test_suite
+tests_require = setuptools.dist:check_requirements
+use_2to3 = setuptools.dist:invalid_unless_false
+zip_safe = setuptools.dist:assert_bool
+
+[egg_info.writers]
+PKG-INFO = setuptools.command.egg_info:write_pkg_info
+dependency_links.txt = setuptools.command.egg_info:overwrite_arg
+depends.txt = setuptools.command.egg_info:warn_depends_obsolete
+eager_resources.txt = setuptools.command.egg_info:overwrite_arg
+entry_points.txt = setuptools.command.egg_info:write_entries
+namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
+requires.txt = setuptools.command.egg_info:write_requirements
+top_level.txt = setuptools.command.egg_info:write_toplevel_names
+
+[setuptools.finalize_distribution_options]
+keywords = setuptools.dist:Distribution._finalize_setup_keywords
+parent_finalize = setuptools.dist:_Distribution.finalize_options
+
diff --git a/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..370203244ceae5a5b18aaa53f830adeb2b6bb795
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/top_level.txt
@@ -0,0 +1,4 @@
+_distutils_hack
+debian
+pkg_resources
+setuptools
diff --git a/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/INSTALLER b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/METADATA b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..72504946308118d4d441c177c31443a27891865c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/METADATA
@@ -0,0 +1,35 @@
+Metadata-Version: 2.1
+Name: triton
+Version: 2.2.0
+Summary: A language and compiler for custom Deep Learning operations
+Home-page: https://github.com/openai/triton/
+Author: Philippe Tillet
+Author-email: phil@openai.com
+Keywords: Compiler,Deep Learning
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: Software Development :: Build Tools
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Requires-Dist: filelock
+Provides-Extra: build
+Requires-Dist: cmake >=3.20 ; extra == 'build'
+Requires-Dist: lit ; extra == 'build'
+Provides-Extra: tests
+Requires-Dist: autopep8 ; extra == 'tests'
+Requires-Dist: flake8 ; extra == 'tests'
+Requires-Dist: isort ; extra == 'tests'
+Requires-Dist: numpy ; extra == 'tests'
+Requires-Dist: pytest ; extra == 'tests'
+Requires-Dist: scipy >=1.7.1 ; extra == 'tests'
+Requires-Dist: torch ; extra == 'tests'
+Provides-Extra: tutorials
+Requires-Dist: matplotlib ; extra == 'tutorials'
+Requires-Dist: pandas ; extra == 'tutorials'
+Requires-Dist: tabulate ; extra == 'tutorials'
+Requires-Dist: torch ; extra == 'tutorials'
+
diff --git a/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/RECORD b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..1e2c723d4f7abe5e3d9aefac01712c45a504aee8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/RECORD
@@ -0,0 +1,95 @@
+triton-2.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+triton-2.2.0.dist-info/METADATA,sha256=IDmQW_4IUQtySps9Fv-NCHiJkUNY_aI_r2acxR76pN0,1356
+triton-2.2.0.dist-info/RECORD,,
+triton-2.2.0.dist-info/WHEEL,sha256=1FEjxEYgybphwh9S0FO9IcZ0B-NIeM2ko8OzhFZeOeQ,152
+triton-2.2.0.dist-info/top_level.txt,sha256=bJirsiO60v0aa1MImn9dJxjN8oNC_kUKe0X3pr7KHPQ,190
+triton/_C/libtriton.so,sha256=oRbyj9CwHK3r4YJXEipDL59rzbz1UxBytLGzJGQgxZA,355816224
+triton/__init__.py,sha256=WCf3ik411OotHIyMAwii6bbZpSvfh2gy_IOlT5BO5V8,1230
+triton/__pycache__/__init__.cpython-310.pyc,,
+triton/__pycache__/testing.cpython-310.pyc,,
+triton/common/__init__.py,sha256=oif2rQjHnXXQpRzxm5vylRNQL2fQNhHvPo0XaO0z208,116
+triton/common/__pycache__/__init__.cpython-310.pyc,,
+triton/common/__pycache__/backend.cpython-310.pyc,,
+triton/common/__pycache__/build.cpython-310.pyc,,
+triton/common/backend.py,sha256=NodHQbmLuMWD-Vbdhwihi6ehHGGLkqn2rEdQWeWKGpQ,5414
+triton/common/build.py,sha256=TAn38HAtBrT4ykPUN87980jUbNH-Ajgho8oFrpXt29Y,4675
+triton/compiler/__init__.py,sha256=hmfZoMEEgYAaU9MbrrRbXo7fOUps4gVvtZn4rxANp1I,341
+triton/compiler/__pycache__/__init__.cpython-310.pyc,,
+triton/compiler/__pycache__/code_generator.cpython-310.pyc,,
+triton/compiler/__pycache__/compiler.cpython-310.pyc,,
+triton/compiler/__pycache__/errors.cpython-310.pyc,,
+triton/compiler/__pycache__/make_launcher.cpython-310.pyc,,
+triton/compiler/__pycache__/target.cpython-310.pyc,,
+triton/compiler/__pycache__/utils.cpython-310.pyc,,
+triton/compiler/code_generator.py,sha256=4J4a76pgaKFcooVjvbtIuRKBsBovmJYB3cteRiI1R_E,54217
+triton/compiler/compiler.py,sha256=XHvD9EjR0n7diBucEzotfHbyj7Jj8u7xTSfAAbhZua8,29863
+triton/compiler/errors.py,sha256=PiquMxHuHayRvdH3hMXSQlOnDswK3TGNWENB29YX_FU,1666
+triton/compiler/make_launcher.py,sha256=zdNyb-4DzyRp7_wdwfaHojCOjCnSlO3rirLEmN050k4,10118
+triton/compiler/target.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+triton/compiler/utils.py,sha256=rUYhGSg97k4oZ7Q4MS7XuStbNl1-Eh5Tgf5KSHacRJE,12703
+triton/language/__init__.py,sha256=Mqgkv832S-wdYMhTLEFLQ5-5Ms037E0yUji9KMEmD1c,3241
+triton/language/__pycache__/__init__.cpython-310.pyc,,
+triton/language/__pycache__/core.cpython-310.pyc,,
+triton/language/__pycache__/math.cpython-310.pyc,,
+triton/language/__pycache__/random.cpython-310.pyc,,
+triton/language/__pycache__/semantic.cpython-310.pyc,,
+triton/language/__pycache__/standard.cpython-310.pyc,,
+triton/language/core.py,sha256=6m6DAfmHVRcvg9UeEes8PPaU5W-Y-qQ5yDdo0lIeoLE,61916
+triton/language/extra/__init__.py,sha256=zyhlj6Mo9_KD7E5szGxI18Ko3b31E00-s1ybOM5KzkQ,39
+triton/language/extra/__pycache__/__init__.cpython-310.pyc,,
+triton/language/extra/__pycache__/cuda.cpython-310.pyc,,
+triton/language/extra/cuda.py,sha256=kfFlr8Wldjr09BoSGAxcNxcrAbUtIWCFfwW8jCGTwcs,559
+triton/language/math.py,sha256=oyoAeuEyjhQ9_2QLtEapITAWyZDBUqtIsFQUac3gLcY,62306
+triton/language/random.py,sha256=28hJYR1YNE8Hz8c8yCmKHb7YRRuhkZW5qJyD9yoJyws,6567
+triton/language/semantic.py,sha256=PZVm4617KYl7D4KKFI3pQt3RDjJpQPvYK-VWIcaYecA,68708
+triton/language/standard.py,sha256=HhLVz4G5XUnZPz4VoKzEH9b35w6a-SdjCF_jPV83pHM,10986
+triton/ops/__init__.py,sha256=NL5fhIJywJWOsSHMDA6JvjGhS7xA4-Z6CYIAQRy1FRU,313
+triton/ops/__pycache__/__init__.cpython-310.pyc,,
+triton/ops/__pycache__/cross_entropy.cpython-310.pyc,,
+triton/ops/__pycache__/flash_attention.cpython-310.pyc,,
+triton/ops/__pycache__/matmul.cpython-310.pyc,,
+triton/ops/__pycache__/matmul_perf_model.cpython-310.pyc,,
+triton/ops/blocksparse/__init__.py,sha256=6YEVQNzipgQCpoO_7B8H7ckaSW2Idt1244s7IyLWAwc,100
+triton/ops/blocksparse/__pycache__/__init__.cpython-310.pyc,,
+triton/ops/blocksparse/__pycache__/matmul.cpython-310.pyc,,
+triton/ops/blocksparse/__pycache__/softmax.cpython-310.pyc,,
+triton/ops/blocksparse/matmul.py,sha256=S29Wv0X47AUoCMfSw7A7-Lt6lUyGPzy63Q8pcD41O1w,15920
+triton/ops/blocksparse/softmax.py,sha256=2jfmu1Bn9XsM4PyBsSRaSi3-XK0bJABxwQ-XsTwo7fg,8243
+triton/ops/cross_entropy.py,sha256=Jr-iQ6oZQir8gh4WRmlPoh_CY4fM8x9c9dDsuavyFyQ,3451
+triton/ops/flash_attention.py,sha256=cwzNu7vlVnfS1J4dXiIrmDHLtfQBa-PIpsnn-OKmiH4,17945
+triton/ops/matmul.py,sha256=c37U9tqBnaUdt1OyFdGZwZpG3KR6IXNz0cveJIx-Uuc,8902
+triton/ops/matmul_perf_model.py,sha256=WoDhHVD0VwAISJ8ZqMJqmK0dpEMk4Xrxur4vwIJ9VWw,6543
+triton/runtime/__init__.py,sha256=Y4QxhfVrpiu8PtJFWR5DU1yj6RSxzwUbrpyCi2hCZNc,440
+triton/runtime/__pycache__/__init__.cpython-310.pyc,,
+triton/runtime/__pycache__/autotuner.cpython-310.pyc,,
+triton/runtime/__pycache__/cache.cpython-310.pyc,,
+triton/runtime/__pycache__/driver.cpython-310.pyc,,
+triton/runtime/__pycache__/errors.cpython-310.pyc,,
+triton/runtime/__pycache__/interpreter.cpython-310.pyc,,
+triton/runtime/__pycache__/jit.cpython-310.pyc,,
+triton/runtime/autotuner.py,sha256=sWzpi-a0qPaspGaCyg3yVAsiy8QzEJ9bFtgJ3uM8zkg,13612
+triton/runtime/backends/cuda.c,sha256=XHNGYvyjAs-JLB_SeN0RDJYgFC2_wtmoR-BNIm-V3eA,22730
+triton/runtime/backends/hip.c,sha256=j3sgv1Qm9vZu-KVpXL4fH01eLtWSMQB7bHUay7jCmAE,4121
+triton/runtime/cache.py,sha256=2XGluIQc9VpBDcfyh4H3XoB_ldKOLE87ZHFRbx10azY,5178
+triton/runtime/driver.py,sha256=A3Hl8W7EIH9q2l1oijNHAfrC1xQCLR4D5311oIfFhfU,5631
+triton/runtime/errors.py,sha256=hjdNdL7xZRFyt7TUMbTBwEWe5yitLkMkWlPW-HCVLlI,543
+triton/runtime/interpreter.py,sha256=bg8De0KpKwoe3DKiLbZ9Fq4jBkf1mnRoKBSPb5MXs5s,21582
+triton/runtime/jit.py,sha256=E0iysZOyWbXUFcCj60y6C7mRkA3F0sz4C6FruFtmnvA,25699
+triton/testing.py,sha256=t6fAnZlikUJUMOrQCT0nn2PCPGHuDyhjON_VYUpQ48g,18258
+triton/third_party/cuda/bin/cuobjdump,sha256=M3aisp1Sv424T0BOqhn3rI92P0K-KEcBnB3qD1KQh64,540144
+triton/third_party/cuda/bin/nvdisasm,sha256=vxrhwuck1PI4_RQ2lidzhaIKqxLqPBB_1bh0nP2VSEs,50678760
+triton/third_party/cuda/bin/ptxas,sha256=641SCj3yUiIP_edDSDLDK6c7LHkSMF8IOrHVLxa7lwQ,29429360
+triton/third_party/cuda/include/cuda.h,sha256=J1YRiXq0IN6GG3ZLLk1T7oQsQtY2wFgES9LCG3Tte0g,932912
+triton/third_party/cuda/lib/libdevice.10.bc,sha256=XC-uN8huaMOjhgWpX1EtfRLV89uYYxC-R_VzBKpype4,473728
+triton/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+triton/tools/__pycache__/__init__.cpython-310.pyc,,
+triton/tools/__pycache__/build_extern.cpython-310.pyc,,
+triton/tools/__pycache__/compile.cpython-310.pyc,,
+triton/tools/__pycache__/disasm.cpython-310.pyc,,
+triton/tools/__pycache__/link.cpython-310.pyc,,
+triton/tools/build_extern.py,sha256=9v5Lz1vwksR0xqY9_oT5KltuKVXls-O7GqebAfZauoc,14363
+triton/tools/compile.c,sha256=rjuAQ8b-2DTtbj29SgK1NxJI5BSU2P9ccp9wa5p8Iyc,2090
+triton/tools/compile.h,sha256=n9QKIFZTL4RSsiXtAxBP9XGSnxjyaevQQ9bBpwDsvAg,332
+triton/tools/compile.py,sha256=U76d265bqoZUC6GWFuX-wnRA3oba_dNRDUfMXQh_EQg,6495
+triton/tools/disasm.py,sha256=U58GRL7v14hu4-B_kWkciHaY9jVIkTKg7DtioH4LTHo,5080
+triton/tools/link.py,sha256=EODqTTEk8N4kjJkU099X--0dGwviFjJNiLcBxUUukhw,11824
diff --git a/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/WHEEL b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..1d812513305907d2ee59b95d161fdb54d1ab559c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.42.0)
+Root-Is-Purelib: false
+Tag: cp310-cp310-manylinux_2_17_x86_64
+Tag: cp310-cp310-manylinux2014_x86_64
+
diff --git a/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/top_level.txt b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..14f25ad9b60025addcb1083d0c33710bf883a244
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/triton-2.2.0.dist-info/top_level.txt
@@ -0,0 +1,12 @@
+triton
+triton/_C
+triton/common
+triton/compiler
+triton/language
+triton/language/extra
+triton/ops
+triton/ops/blocksparse
+triton/runtime
+triton/runtime/backends
+triton/third_party
+triton/tools