diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c1bfeb184ed162f2edfd20c751b50f385ab56ef
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd81697f6715541c3aa1f6bab827a244533b1847
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..12d57b304c5c0158fac703f0646a9274470419c6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/builder.bak.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5354ab2d9c47d71c84aa5a378f23ea4198a7a5bd
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac9372afa2f71df1dbe34d62675f72d81172b7e4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a46335691ed8e78eb371b3ce4903688d2b38cbbe
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2deb27ffc33492ee35c6ff64f42b15b90b57bcf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8652cca2852c50ce475596e3ed7afb025f54fb16
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eacd46632b16a4fd1218c73ce1fc70e2834de50c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/iterable_dataset.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe5ae0793d08e70bef6084639b6fe5dd905dbdda
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/metric.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..345b7ec6353e51eb8e4826a7e2284bfeca35579b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..703c997211c4f2be39c0d59c51999d5f38992ed7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/arrow_dataset.py b/llmeval-env/lib/python3.10/site-packages/datasets/arrow_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..686f2e37121f487e2ebb91477f47eff73d5a3b99
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/arrow_dataset.py
@@ -0,0 +1,6495 @@
+# Copyright 2020 The HuggingFace Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Simple Dataset wrapping an Arrow Table."""
+
+import contextlib
+import copy
+import fnmatch
+import itertools
+import json
+import math
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tempfile
+import time
+import warnings
+import weakref
+from collections import Counter
+from collections.abc import Mapping
+from copy import deepcopy
+from functools import partial, wraps
+from io import BytesIO
+from math import ceil, floor
+from pathlib import Path
+from random import sample
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ BinaryIO,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ overload,
+)
+from typing import Sequence as Sequence_
+
+import fsspec
+import numpy as np
+import pandas as pd
+import pyarrow as pa
+import pyarrow.compute as pc
+from fsspec.core import url_to_fs
+from huggingface_hub import (
+ CommitInfo,
+ CommitOperationAdd,
+ CommitOperationDelete,
+ DatasetCard,
+ DatasetCardData,
+ HfApi,
+)
+from huggingface_hub.hf_api import RepoFile
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from . import config
+from .arrow_reader import ArrowReader
+from .arrow_writer import ArrowWriter, OptimizedTypedSequence
+from .data_files import sanitize_patterns
+from .download.streaming_download_manager import xgetsize
+from .features import Audio, ClassLabel, Features, Image, Sequence, Value
+from .features.features import (
+ FeatureType,
+ _align_features,
+ _check_if_features_can_be_aligned,
+ generate_from_arrow_type,
+ pandas_types_mapper,
+ require_decoding,
+)
+from .filesystems import is_remote_filesystem
+from .fingerprint import (
+ fingerprint_transform,
+ format_kwargs_for_fingerprint,
+ format_transform_for_fingerprint,
+ generate_fingerprint,
+ generate_random_fingerprint,
+ get_temporary_cache_files_directory,
+ is_caching_enabled,
+ maybe_register_dataset_for_temp_dir_deletion,
+ update_fingerprint,
+ validate_fingerprint,
+)
+from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table
+from .formatting.formatting import LazyDict, _is_range_contiguous
+from .info import DatasetInfo, DatasetInfosDict
+from .naming import _split_re
+from .search import IndexableMixin
+from .splits import NamedSplit, Split, SplitDict, SplitInfo
+from .table import (
+ InMemoryTable,
+ MemoryMappedTable,
+ Table,
+ _memory_mapped_record_batch_reader_from_file,
+ cast_array_to_feature,
+ concat_tables,
+ embed_table_storage,
+ list_table_cache_files,
+ table_cast,
+ table_iter,
+ table_visitor,
+)
+from .tasks import TaskTemplate
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import estimate_dataset_size
+from .utils.info_utils import is_small_dataset
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import (
+ Literal,
+ asdict,
+ convert_file_size_to_int,
+ glob_pattern_to_regex,
+ iflatmap_unordered,
+ string_to_dict,
+ unique_values,
+)
+from .utils.stratify import stratified_shuffle_split_generate_indices
+from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf
+from .utils.typing import ListLike, PathLike
+
+
+if TYPE_CHECKING:
+ import sqlite3
+
+ import polars as pl
+ import pyspark
+ import sqlalchemy
+
+ from .dataset_dict import DatasetDict
+ from .iterable_dataset import IterableDataset
+
+logger = logging.get_logger(__name__)
+
+PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED = (
+ "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.parquet"
+)
+
+
+class DatasetInfoMixin:
+ """This base class exposes some attributes of DatasetInfo
+ at the base level of the Dataset for easy access.
+ """
+
+ def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]):
+ self._info = info
+ self._split = split
+
+ @property
+ def info(self):
+ """[`~datasets.DatasetInfo`] object containing all the metadata in the dataset."""
+ return self._info
+
+ @property
+ def split(self):
+ """[`~datasets.NamedSplit`] object corresponding to a named dataset split."""
+ return self._split
+
+ @property
+ def builder_name(self) -> str:
+ return self._info.builder_name
+
+ @property
+ def citation(self) -> str:
+ return self._info.citation
+
+ @property
+ def config_name(self) -> str:
+ return self._info.config_name
+
+ @property
+ def dataset_size(self) -> Optional[int]:
+ return self._info.dataset_size
+
+ @property
+ def description(self) -> str:
+ return self._info.description
+
+ @property
+ def download_checksums(self) -> Optional[dict]:
+ return self._info.download_checksums
+
+ @property
+ def download_size(self) -> Optional[int]:
+ return self._info.download_size
+
+ @property
+ def features(self) -> Optional[Features]:
+ return self._info.features.copy() if self._info.features is not None else None
+
+ @property
+ def homepage(self) -> Optional[str]:
+ return self._info.homepage
+
+ @property
+ def license(self) -> Optional[str]:
+ return self._info.license
+
+ @property
+ def size_in_bytes(self) -> Optional[int]:
+ return self._info.size_in_bytes
+
+ @property
+ def supervised_keys(self):
+ return self._info.supervised_keys
+
+ @property
+ def task_templates(self):
+ return self._info.task_templates
+
+ @property
+ def version(self):
+ return self._info.version
+
+
+class TensorflowDatasetMixin:
+ _TF_DATASET_REFS = set()
+
+ @staticmethod
+ def _get_output_signature(
+ dataset: "Dataset",
+ collate_fn: Callable,
+ collate_fn_args: dict,
+ cols_to_retain: Optional[List[str]] = None,
+ batch_size: Optional[int] = None,
+ num_test_batches: int = 20,
+ ):
+ """Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset
+ after being passed through the collate_fn. Tensorflow needs an exact signature for tf.numpy_function, so
+ the only way to do this is to run test batches - the collator may add or rename columns, so we can't figure
+ it out just by inspecting the dataset.
+
+ Args:
+ dataset (`Dataset`): Dataset to load samples from.
+ collate_fn(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
+ validation/evaluation.
+ collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
+ `collate_fn`.
+ batch_size (`int`, optional): The size of batches loaded from the dataset. Used for shape inference.
+ Can be None, which indicates that batch sizes can be variable.
+ num_test_batches (`int`): The number of batches to load from the dataset for shape inference.
+
+ Returns:
+ `dict`: Dict mapping column names to tf.Tensorspec objects
+ `dict`: Dict mapping column names to np.dtype objects
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ if len(dataset) == 0:
+ raise ValueError("Unable to get the output signature because the dataset is empty.")
+ if batch_size is not None:
+ batch_size = min(len(dataset), batch_size)
+ test_batch_size = 1
+
+ if cols_to_retain is not None:
+ cols_to_retain = list(set(cols_to_retain + ["label_ids", "label", "labels"]))
+
+ test_batches = []
+ for _ in range(num_test_batches):
+ indices = sample(range(len(dataset)), test_batch_size)
+ test_batch = dataset[indices]
+ if cols_to_retain is not None:
+ test_batch = {key: value for key, value in test_batch.items() if key in cols_to_retain}
+ test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)]
+ test_batch = collate_fn(test_batch, **collate_fn_args)
+ test_batches.append(test_batch)
+
+ tf_columns_to_signatures = {}
+ np_columns_to_dtypes = {}
+ for column in test_batches[0].keys():
+ raw_arrays = [batch[column] for batch in test_batches]
+ # In case the collate_fn returns something strange
+ np_arrays = []
+ for array in raw_arrays:
+ if isinstance(array, np.ndarray):
+ np_arrays.append(array)
+ elif isinstance(array, tf.Tensor):
+ np_arrays.append(array.numpy())
+ else:
+ np_arrays.append(np.array(array))
+
+ if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool:
+ tf_dtype = tf.int64
+ np_dtype = np.int64
+ elif np.issubdtype(np_arrays[0].dtype, np.number):
+ tf_dtype = tf.float32
+ np_dtype = np.float32
+ elif np_arrays[0].dtype.kind == "U": # Unicode strings
+ np_dtype = np.unicode_
+ tf_dtype = tf.string
+ else:
+ raise RuntimeError(
+ f"Unrecognized array dtype {np_arrays[0].dtype}. \n"
+ "Nested types and image/audio types are not supported yet."
+ )
+ shapes = [array.shape for array in np_arrays]
+ static_shape = []
+ for dim in range(len(shapes[0])):
+ sizes = {shape[dim] for shape in shapes}
+ if dim == 0:
+ static_shape.append(batch_size)
+ continue
+ if len(sizes) == 1: # This dimension looks constant
+ static_shape.append(sizes.pop())
+ else: # Use None for variable dimensions
+ static_shape.append(None)
+ tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype)
+ np_columns_to_dtypes[column] = np_dtype
+
+ return tf_columns_to_signatures, np_columns_to_dtypes
+
+ def to_tf_dataset(
+ self,
+ batch_size: Optional[int] = None,
+ columns: Optional[Union[str, List[str]]] = None,
+ shuffle: bool = False,
+ collate_fn: Optional[Callable] = None,
+ drop_remainder: bool = False,
+ collate_fn_args: Optional[Dict[str, Any]] = None,
+ label_cols: Optional[Union[str, List[str]]] = None,
+ prefetch: bool = True,
+ num_workers: int = 0,
+ num_test_batches: int = 20,
+ ):
+ """Create a `tf.data.Dataset` from the underlying Dataset. This `tf.data.Dataset` will load and collate batches from
+ the Dataset, and is suitable for passing to methods like `model.fit()` or `model.predict()`. The dataset will yield
+ `dicts` for both inputs and labels unless the `dict` would contain only a single key, in which case a raw
+ `tf.Tensor` is yielded instead.
+
+ Args:
+ batch_size (`int`, *optional*):
+ Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be
+ batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
+ columns (`List[str]` or `str`, *optional*):
+ Dataset column(s) to load in the `tf.data.Dataset`.
+ Column names that are created by the `collate_fn` and that do not exist in the original dataset can be used.
+ shuffle(`bool`, defaults to `False`):
+ Shuffle the dataset order when loading. Recommended `True` for training, `False` for
+ validation/evaluation.
+ drop_remainder(`bool`, defaults to `False`):
+ Drop the last incomplete batch when loading. Ensures
+ that all batches yielded by the dataset will have the same length on the batch dimension.
+ collate_fn(`Callable`, *optional*):
+ A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`, *optional*):
+ An optional `dict` of keyword arguments to be passed to the
+ `collate_fn`.
+ label_cols (`List[str]` or `str`, defaults to `None`):
+ Dataset column(s) to load as labels.
+ Note that many models compute loss internally rather than letting Keras do it, in which case
+ passing the labels here is optional, as long as they're in the input `columns`.
+ prefetch (`bool`, defaults to `True`):
+ Whether to run the dataloader in a separate thread and maintain
+ a small buffer of batches for training. Improves performance by allowing data to be loaded in the
+ background while the model is training.
+ num_workers (`int`, defaults to `0`):
+ Number of workers to use for loading the dataset. Only supported on Python versions >= 3.8.
+ num_test_batches (`int`, defaults to `20`):
+ Number of batches to use to infer the output signature of the dataset.
+ The higher this number, the more accurate the signature will be, but the longer it will take to
+ create the dataset.
+
+ Returns:
+ `tf.data.Dataset`
+
+ Example:
+
+ ```py
+ >>> ds_train = ds["train"].to_tf_dataset(
+ ... columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ ... shuffle=True,
+ ... batch_size=16,
+ ... collate_fn=data_collator,
+ ... )
+ ```
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ if (isinstance(columns, list) and len(columns) == 1) or (
+ isinstance(label_cols, list) and len(label_cols) == 1
+ ):
+ warnings.warn(
+ "The output of `to_tf_dataset` will change when a passing single element list for `labels` or "
+ "`columns` in the next datasets version. To return a tuple structure rather than dict, pass a "
+ "single string.\n"
+ "Old behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n"
+ " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \n"
+ "New behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n"
+ " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ",
+ FutureWarning,
+ )
+
+ if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
+ logger.warning(
+ "Note that to_tf_dataset() loads the data with a generator rather than a full tf.data "
+ "pipeline and is not compatible with remote TPU connections. If you encounter errors, please "
+ "try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of "
+ "Tensors instead of streaming with to_tf_dataset()."
+ )
+
+ if collate_fn is None:
+ # Set a very simple default collator that just stacks things together
+ collate_fn = minimal_tf_collate_fn
+ if collate_fn_args is None:
+ collate_fn_args = {}
+ if label_cols and not columns:
+ raise ValueError("Cannot specify label_cols without specifying columns!")
+ if label_cols is None:
+ label_cols = []
+ elif isinstance(label_cols, str):
+ label_cols = [label_cols]
+ if len(set(label_cols)) < len(label_cols):
+ raise ValueError("List of label_cols contains duplicates.")
+ if columns:
+ if isinstance(columns, str):
+ columns = [columns]
+ if len(set(columns)) < len(columns):
+ raise ValueError("List of columns contains duplicates.")
+ cols_to_retain = list(set(columns + label_cols))
+ else:
+ cols_to_retain = None # Indicates keeping all valid columns
+ columns = []
+
+ if self.format["type"] not in ["custom", "numpy"]:
+ dataset = self.with_format("numpy")
+ else:
+ dataset = self
+
+ # TODO(Matt, QL): deprecate the retention of label_ids and label
+
+ output_signature, columns_to_np_types = dataset._get_output_signature(
+ dataset,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ cols_to_retain=cols_to_retain,
+ batch_size=batch_size if drop_remainder else None,
+ num_test_batches=num_test_batches,
+ )
+
+ if "labels" in output_signature:
+ if ("label_ids" in columns or "label" in columns) and "labels" not in columns:
+ columns = [col for col in columns if col not in ["label_ids", "label"]] + ["labels"]
+ if ("label_ids" in label_cols or "label" in label_cols) and "labels" not in label_cols:
+ label_cols = [col for col in label_cols if col not in ["label_ids", "label"]] + ["labels"]
+
+ for col in columns:
+ if col not in output_signature:
+ raise ValueError(f"Column {col} not found in dataset!")
+
+ for col in label_cols:
+ if col not in output_signature:
+ raise ValueError(f"Label column {col} not found in dataset!")
+
+ if num_workers == 0:
+ tf_dataset = dataset_to_tf(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ )
+ elif num_workers > 0:
+ if batch_size is None:
+ raise NotImplementedError(
+ "`batch_size` must be specified when using multiple workers, as unbatched multiprocessing "
+ "is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0."
+ )
+ tf_dataset = multiprocess_dataset_to_tf(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ num_workers=num_workers,
+ )
+ else:
+ raise ValueError("num_workers must be >= 0")
+
+ def split_features_and_labels(input_batch):
+ # TODO(Matt, QL): deprecate returning the dict content when there's only one key
+ features = {key: tensor for key, tensor in input_batch.items() if key in columns}
+ labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols}
+ if len(features) == 1:
+ features = list(features.values())[0]
+ if len(labels) == 1:
+ labels = list(labels.values())[0]
+ if isinstance(labels, dict) and len(labels) == 0:
+ return features
+ else:
+ return features, labels
+
+ if cols_to_retain is not None:
+ tf_dataset = tf_dataset.map(split_features_and_labels)
+
+ if prefetch:
+ tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
+
+ # Remove a reference to the open Arrow file on delete
+ def cleanup_callback(ref):
+ dataset.__del__()
+ self._TF_DATASET_REFS.remove(ref)
+
+ self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback))
+
+ return tf_dataset
+
+
+class DatasetTransformationNotAllowedError(Exception):
+ pass
+
+
+def transmit_format(func):
+ """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset"""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Dataset" = args[0]
+ args = args[1:]
+ else:
+ self: "Dataset" = kwargs.pop("self")
+ # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None
+ unformatted_columns = set(self.column_names) - set(self._format_columns or [])
+ self_format = {
+ "type": self._format_type,
+ "format_kwargs": self._format_kwargs,
+ "columns": self._format_columns,
+ "output_all_columns": self._output_all_columns,
+ }
+ # apply actual function
+ out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
+ datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
+ # re-apply format to the output
+ for dataset in datasets:
+ new_format = self_format.copy()
+ if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns)
+ # sort the columns to have a deterministic list of columns that we can compare with `out_format`
+ new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns)
+ out_format = {
+ "type": dataset._format_type,
+ "format_kwargs": dataset._format_kwargs,
+ "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None,
+ "output_all_columns": dataset._output_all_columns,
+ }
+ if out_format != new_format:
+ fingerprint = dataset._fingerprint
+ dataset.set_format(**new_format)
+ dataset._fingerprint = fingerprint
+ return out
+
+ wrapper._decorator_name_ = "transmit_format"
+ return wrapper
+
+
+def transmit_tasks(func):
+ """Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset"""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Dataset" = args[0]
+ args = args[1:]
+ else:
+ self: "Dataset" = kwargs.pop("self")
+ # apply actual function
+ out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
+ datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
+ for dataset in datasets:
+ # Remove task templates if a column mapping of the template is no longer valid
+ if self.info.task_templates is not None:
+ dataset.info.task_templates = [
+ template
+ for template in self.info.task_templates
+ if all(
+ dataset._info.features.get(k) == self._info.features.get(k)
+ for k in template.column_mapping.keys()
+ )
+ ]
+ return out
+
+ wrapper._decorator_name_ = "transmit_tasks"
+ return wrapper
+
+
+def update_metadata_with_features(table: Table, features: Features):
+ """To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema."""
+ features = Features({col_name: features[col_name] for col_name in table.column_names})
+ if table.schema.metadata is None or b"huggingface" not in table.schema.metadata:
+ pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features))
+ else:
+ metadata = json.loads(table.schema.metadata[b"huggingface"].decode())
+ if "info" not in metadata:
+ metadata["info"] = asdict(DatasetInfo(features=features))
+ else:
+ metadata["info"]["features"] = asdict(DatasetInfo(features=features))["features"]
+ pa_metadata = {"huggingface": json.dumps(metadata)}
+ table = table.replace_schema_metadata(pa_metadata)
+ return table
+
+
+def _check_table(table) -> Table:
+ """We check the table type to make sure it's an instance of :class:`datasets.table.Table`"""
+ if isinstance(table, pa.Table):
+ # for a pyarrow table, we can just consider it as a in-memory table
+ # this is here for backward compatibility
+ return InMemoryTable(table)
+ elif isinstance(table, Table):
+ return table
+ else:
+ raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.")
+
+
+def _check_column_names(column_names: List[str]):
+ """Check the column names to make sure they don't contain duplicates."""
+ counter = Counter(column_names)
+ if not all(count == 1 for count in counter.values()):
+ duplicated_columns = [col for col in counter if counter[col] > 1]
+ raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.")
+
+
+def _check_valid_indices_value(index, size):
+ if (index < 0 and index + size < 0) or (index >= size):
+ raise IndexError(f"Index {index} out of range for dataset of size {size}.")
+
+
+class NonExistentDatasetError(Exception):
+ """Used when we expect the existence of a dataset"""
+
+ pass
+
+
+class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin):
+ """A Dataset backed by an Arrow table."""
+
+ def __init__(
+ self,
+ arrow_table: Table,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_table: Optional[Table] = None,
+ fingerprint: Optional[str] = None,
+ ):
+ info = info.copy() if info is not None else DatasetInfo()
+ DatasetInfoMixin.__init__(self, info=info, split=split)
+ IndexableMixin.__init__(self)
+
+ self._data: Table = _check_table(arrow_table)
+ self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None
+ maybe_register_dataset_for_temp_dir_deletion(self)
+
+ self._format_type: Optional[str] = None
+ self._format_kwargs: dict = {}
+ self._format_columns: Optional[list] = None
+ self._output_all_columns: bool = False
+ self._fingerprint: str = fingerprint
+
+ # Read metadata
+
+ if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata:
+ metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode())
+ if (
+ "fingerprint" in metadata and self._fingerprint is None
+ ): # try to load fingerprint from the arrow file metadata
+ self._fingerprint = metadata["fingerprint"]
+
+ # Infer features if None
+ inferred_features = Features.from_arrow_schema(arrow_table.schema)
+ if self.info.features is None:
+ self.info.features = inferred_features
+ else: # make sure the nested columns are in the right order
+ try:
+ self.info.features = self.info.features.reorder_fields_as(inferred_features)
+ except ValueError as e:
+ raise ValueError(
+ f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file."
+ )
+
+ # Infer fingerprint if None
+
+ if self._fingerprint is None:
+ self._fingerprint = generate_fingerprint(self)
+
+ # Sanity checks
+
+ if self._info.features is None:
+ raise ValueError("Features can't be None in a Dataset object")
+ if self._fingerprint is None:
+ raise ValueError("Fingerprint can't be None in a Dataset object")
+ if self.info.features.type != inferred_features.type:
+ raise ValueError(
+ f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}"
+ )
+
+ if self._indices is not None:
+ if not pa.types.is_unsigned_integer(self._indices.column(0).type):
+ raise ValueError(
+ f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}"
+ )
+ _check_column_names(self._data.column_names)
+
+ self._data = update_metadata_with_features(self._data, self._info.features)
+
+ @property
+ def features(self) -> Features:
+ features = super().features
+ if features is None: # this is already checked in __init__
+ raise ValueError("Features can't be None in a Dataset object")
+ return features
+
+ @classmethod
+ def from_file(
+ cls,
+ filename: str,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_filename: Optional[str] = None,
+ in_memory: bool = False,
+ ) -> "Dataset":
+ """Instantiate a Dataset backed by an Arrow table at filename.
+
+ Args:
+ filename (`str`):
+ File name of the dataset.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ indices_filename (`str`, *optional*):
+ File names of the indices.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ [`Dataset`]
+ """
+ table = ArrowReader.read_table(filename, in_memory=in_memory)
+
+ if indices_filename is not None:
+ indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory)
+ else:
+ indices_pa_table = None
+
+ return cls(
+ arrow_table=table,
+ info=info,
+ split=split,
+ indices_table=indices_pa_table,
+ )
+
+ @classmethod
+ def from_buffer(
+ cls,
+ buffer: pa.Buffer,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_buffer: Optional[pa.Buffer] = None,
+ ) -> "Dataset":
+ """Instantiate a Dataset backed by an Arrow buffer.
+
+ Args:
+ buffer (`pyarrow.Buffer`):
+ Arrow buffer.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ indices_buffer (`pyarrow.Buffer`, *optional*):
+ Indices Arrow buffer.
+
+ Returns:
+ [`Dataset`]
+ """
+ table = InMemoryTable.from_buffer(buffer)
+
+ if indices_buffer is not None:
+ indices_table = InMemoryTable.from_buffer(buffer)
+ else:
+ indices_table = None
+
+ return cls(table, info=info, split=split, indices_table=indices_table)
+
+ @classmethod
+ def from_pandas(
+ cls,
+ df: pd.DataFrame,
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ preserve_index: Optional[bool] = None,
+ ) -> "Dataset":
+ """
+ Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`].
+
+ The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the
+ DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the
+ case of `object`, we need to guess the datatype by looking at the Python objects in this Series.
+
+ Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow
+ type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only
+ contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit
+ features and passing it to this function.
+
+ Args:
+ df (`pandas.DataFrame`):
+ Dataframe that contains the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ preserve_index (`bool`, *optional*):
+ Whether to store the index as an additional column in the resulting Dataset.
+ The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only.
+ Use `preserve_index=True` to force it to be stored as a column.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_pandas(df)
+ ```
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ table = InMemoryTable.from_pandas(
+ df=df,
+ preserve_index=preserve_index,
+ )
+ if features is not None:
+ # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema)
+ # needed to support the str to Audio conversion for instance
+ table = table.cast(features.arrow_schema)
+ return cls(table, info=info, split=split)
+
+ @classmethod
+ def from_polars(
+ cls,
+ df: "pl.DataFrame",
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Collect the underlying arrow arrays in an Arrow Table.
+
+ This operation is mostly zero copy.
+
+ Data types that do copy:
+ * CategoricalType
+
+ Args:
+ df (`polars.DataFrame`): DataFrame to convert to Arrow Table
+ features (`Features`, optional): Dataset features.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+
+ Examples:
+ ```py
+ >>> ds = Dataset.from_polars(df)
+ ```
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ table = InMemoryTable(df.to_arrow())
+ if features is not None:
+ # more expensive cast than InMemoryTable.from_polars(..., schema=features.arrow_schema)
+ # needed to support the str to Audio conversion for instance
+ table = table.cast(features.arrow_schema)
+ return cls(table, info=info, split=split)
+
+ @classmethod
+ def from_dict(
+ cls,
+ mapping: dict,
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Convert `dict` to a `pyarrow.Table` to create a [`Dataset`].
+
+ Args:
+ mapping (`Mapping`):
+ Mapping of strings to Arrays or Python lists.
+ features ([`Features`], *optional*):
+ Dataset features.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+
+ Returns:
+ [`Dataset`]
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ arrow_typed_mapping = {}
+ for col, data in mapping.items():
+ if isinstance(data, (pa.Array, pa.ChunkedArray)):
+ data = cast_array_to_feature(data, features[col]) if features is not None else data
+ else:
+ data = OptimizedTypedSequence(
+ features.encode_column(data, col) if features is not None else data,
+ type=features[col] if features is not None else None,
+ col=col,
+ )
+ arrow_typed_mapping[col] = data
+ mapping = arrow_typed_mapping
+ pa_table = InMemoryTable.from_pydict(mapping=mapping)
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ if info.features is None:
+ info.features = Features(
+ {
+ col: generate_from_arrow_type(data.type)
+ if isinstance(data, (pa.Array, pa.ChunkedArray))
+ else data.get_inferred_type()
+ for col, data in mapping.items()
+ }
+ )
+ return cls(pa_table, info=info, split=split)
+
+ @classmethod
+ def from_list(
+ cls,
+ mapping: List[dict],
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`.
+
+ Note that the keys of the first entry will be used to determine the dataset columns,
+ regardless of what is passed to features.
+
+ Args:
+ mapping (`List[dict]`): A list of mappings of strings to row values.
+ features (`Features`, optional): Dataset features.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+
+ Returns:
+ [`Dataset`]
+ """
+ # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here
+ mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {}
+ return cls.from_dict(mapping, features, info, split)
+
+ @staticmethod
+ def from_csv(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from CSV file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the CSV file(s).
+ split ([`NamedSplit`], *optional*):
+ Split name to be assigned to the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`pandas.read_csv`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_csv('path/to/dataset.csv')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetReader
+
+ return CsvDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_generator(
+ generator: Callable,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ gen_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create a Dataset from a generator.
+
+ Args:
+ generator (:`Callable`):
+ A generator function that `yields` examples.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ gen_kwargs(`dict`, *optional*):
+ Keyword arguments to be passed to the `generator` callable.
+ You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+ If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to :[`GeneratorConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> def gen():
+ ... yield {"text": "Good", "label": 0}
+ ... yield {"text": "Bad", "label": 1}
+ ...
+ >>> ds = Dataset.from_generator(gen)
+ ```
+
+ ```py
+ >>> def gen(shards):
+ ... for shard in shards:
+ ... with open(shard) as f:
+ ... for line in f:
+ ... yield {"line": line}
+ ...
+ >>> shards = [f"data{i}.txt" for i in range(32)]
+ >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards})
+ ```
+ """
+ from .io.generator import GeneratorDatasetInputStream
+
+ return GeneratorDatasetInputStream(
+ generator=generator,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ gen_kwargs=gen_kwargs,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_json(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ field: Optional[str] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from JSON or JSON Lines file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the JSON or JSON Lines file(s).
+ split ([`NamedSplit`], *optional*):
+ Split name to be assigned to the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ field (`str`, *optional*):
+ Field name of the JSON file where the dataset is contained in.
+ num_proc (`int`, *optional* defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`JsonConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_json('path/to/dataset.json')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetReader
+
+ return JsonDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ field=field,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_parquet(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ columns: Optional[List[str]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from Parquet file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the Parquet file(s).
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ columns (`List[str]`, *optional*):
+ If not `None`, only these columns will be read from the file.
+ A column name may be a prefix of a nested field, e.g. 'a' will select
+ 'a.b', 'a.c', and 'a.d.e'.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`ParquetConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_parquet('path/to/dataset.parquet')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetReader
+
+ return ParquetDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ columns=columns,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_text(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from text file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the text file(s).
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`TextConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_text('path/to/dataset.txt')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.text import TextDatasetReader
+
+ return TextDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_spark(
+ df: "pyspark.sql.DataFrame",
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ keep_in_memory: bool = False,
+ cache_dir: str = None,
+ working_dir: str = None,
+ load_from_cache_file: bool = True,
+ **kwargs,
+ ):
+ """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers.
+
+ Args:
+ df (`pyspark.sql.DataFrame`):
+ The DataFrame containing the desired data.
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both
+ workers and the driver.
+ keep_in_memory (`bool`):
+ Whether to copy the data in-memory.
+ working_dir (`str`, *optional*)
+ Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting
+ a non-NFS intermediate directory may improve performance.
+ load_from_cache_file (`bool`):
+ Whether to load the dataset from the cache if possible.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> df = spark.createDataFrame(
+ >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
+ >>> columns=["id", "name"],
+ >>> )
+ >>> ds = Dataset.from_spark(df)
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.spark import SparkDatasetReader
+
+ if sys.platform == "win32":
+ raise EnvironmentError("Dataset.from_spark is not currently supported on Windows")
+
+ return SparkDatasetReader(
+ df,
+ split=split,
+ features=features,
+ streaming=False,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ working_dir=working_dir,
+ load_from_cache_file=load_from_cache_file,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_sql(
+ sql: Union[str, "sqlalchemy.sql.Selectable"],
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ):
+ """Create Dataset from SQL query or database table.
+
+ Args:
+ sql (`str` or `sqlalchemy.sql.Selectable`):
+ SQL query to be executed or a table name.
+ con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
+ A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`SqlConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> # Fetch a database table
+ >>> ds = Dataset.from_sql("test_data", "postgres:///db_name")
+ >>> # Execute a SQL query on the table
+ >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name")
+ >>> # Use a Selectable object to specify the query
+ >>> from sqlalchemy import select, text
+ >>> stmt = select([text("sentence")]).select_from(text("test_data"))
+ >>> ds = Dataset.from_sql(stmt, "postgres:///db_name")
+ ```
+
+
+
+ The returned dataset can only be cached if `con` is specified as URI string.
+
+
+ """
+ from .io.sql import SqlDatasetReader
+
+ return SqlDatasetReader(
+ sql,
+ con,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ **kwargs,
+ ).read()
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ maybe_register_dataset_for_temp_dir_deletion(self)
+ return self
+
+ def __del__(self):
+ if hasattr(self, "_data"):
+ del self._data
+ if hasattr(self, "_indices"):
+ del self._indices
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
+ self.__del__()
+
+ def save_to_disk(
+ self,
+ dataset_path: PathLike,
+ fs="deprecated",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_shards: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ ):
+ """
+ Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ For [`Image`] and [`Audio`] data:
+
+ All the Image() and Audio() data are stored in the arrow files.
+ If you want to store paths or urls, please use the Value("string") type.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
+ of the dataset directory where the dataset will be saved to.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"50MB"`).
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
+
+
+ num_proc (`int`, *optional*):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> ds.save_to_disk("path/to/dataset/directory")
+ >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
+ >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024)
+ ```
+ """
+ if max_shard_size is not None and num_shards is not None:
+ raise ValueError(
+ "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
+ )
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ if self.list_indexes():
+ raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset")
+
+ if num_shards is None:
+ dataset_nbytes = self._estimate_nbytes()
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
+ num_shards = max(num_shards, num_proc or 1)
+
+ num_proc = num_proc if num_proc is not None else 1
+ num_shards = num_shards if num_shards is not None else num_proc
+
+ fs: fsspec.AbstractFileSystem
+ fs, _ = url_to_fs(dataset_path, **(storage_options or {}))
+
+ if not is_remote_filesystem(fs):
+ parent_cache_files_paths = {
+ Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files
+ }
+ # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux.
+ if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths:
+ raise PermissionError(
+ f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself."
+ )
+
+ fs.makedirs(dataset_path, exist_ok=True)
+
+ # Get json serializable state
+ state = {
+ key: self.__dict__[key]
+ for key in [
+ "_fingerprint",
+ "_format_columns",
+ "_format_kwargs",
+ "_format_type",
+ "_output_all_columns",
+ ]
+ }
+ state["_split"] = str(self.split) if self.split is not None else self.split
+ state["_data_files"] = [
+ {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards)
+ ]
+ for k in state["_format_kwargs"].keys():
+ try:
+ json.dumps(state["_format_kwargs"][k])
+ except TypeError as e:
+ raise TypeError(
+ str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't."
+ ) from None
+ # Get json serializable dataset info
+ dataset_info = asdict(self._info)
+
+ shards_done = 0
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=len(self),
+ desc=f"Saving the dataset ({shards_done}/{num_shards} shards)",
+ )
+ kwargs_per_job = (
+ {
+ "job_id": shard_idx,
+ "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True),
+ "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"),
+ "storage_options": storage_options,
+ }
+ for shard_idx in range(num_shards)
+ )
+ shard_lengths = [None] * num_shards
+ shard_sizes = [None] * num_shards
+ if num_proc > 1:
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ shards_done += 1
+ pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
+ logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
+ shard_lengths[job_id], shard_sizes[job_id] = content
+ else:
+ pbar.update(content)
+ else:
+ with pbar:
+ for kwargs in kwargs_per_job:
+ for job_id, done, content in Dataset._save_to_disk_single(**kwargs):
+ if done:
+ shards_done += 1
+ pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
+ logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
+ shard_lengths[job_id], shard_sizes[job_id] = content
+ else:
+ pbar.update(content)
+ with fs.open(
+ posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8"
+ ) as state_file:
+ json.dump(state, state_file, indent=2, sort_keys=True)
+ with fs.open(
+ posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8"
+ ) as dataset_info_file:
+ # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True
+ sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)}
+ json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2)
+
+ @staticmethod
+ def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]):
+ batch_size = config.DEFAULT_MAX_BATCH_SIZE
+
+ num_examples_progress_update = 0
+ writer = ArrowWriter(
+ features=shard.features,
+ path=fpath,
+ storage_options=storage_options,
+ embed_local_files=True,
+ )
+ try:
+ _time = time.time()
+ for pa_table in shard.with_format("arrow").iter(batch_size):
+ writer.write_table(pa_table)
+ num_examples_progress_update += len(pa_table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+
+ yield job_id, True, (num_examples, num_bytes)
+
+ @staticmethod
+ def _build_local_temp_path(uri_or_path: str) -> Path:
+ """
+ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative
+ path extracted from the uri) passed.
+
+ Args:
+ uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g.
+ `"s3://my-bucket/dataset/train"`) to concatenate.
+
+ Returns:
+ :class:`Path`: the concatenated path (temp dir + path)
+ """
+ src_dataset_path = Path(uri_or_path)
+ tmp_dir = get_temporary_cache_files_directory()
+ return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor))
+
+ @staticmethod
+ def load_from_disk(
+ dataset_path: str,
+ fs="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ storage_options: Optional[dict] = None,
+ ) -> "Dataset":
+ """
+ Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a
+ filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
+ of the dataset directory where the dataset will be loaded from.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the
+ dataset will not be copied in-memory unless explicitly enabled by setting
+ `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
+ [improve performance](../cache#improve-performance) section.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - If `dataset_path` is a path of a dataset directory, the dataset requested.
+ - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split.
+
+ Example:
+
+ ```py
+ >>> ds = load_from_disk("path/to/dataset/directory")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, dataset_path = url_to_fs(dataset_path, **(storage_options or {}))
+
+ dest_dataset_path = dataset_path
+ dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME)
+ dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
+
+ dataset_dict_is_file = fs.isfile(dataset_dict_json_path)
+ dataset_info_is_file = fs.isfile(dataset_info_path)
+ dataset_state_is_file = fs.isfile(dataset_state_json_path)
+ if not dataset_info_is_file and not dataset_state_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+ if not dataset_info_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+ if not dataset_state_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+
+ # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies
+ if is_remote_filesystem(fs):
+ src_dataset_path = dest_dataset_path
+ dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path)
+ fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True)
+ dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
+
+ with open(dataset_state_json_path, encoding="utf-8") as state_file:
+ state = json.load(state_file)
+ with open(dataset_info_path, encoding="utf-8") as dataset_info_file:
+ dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file))
+
+ dataset_size = estimate_dataset_size(
+ Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]
+ )
+ keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size)
+ table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable
+
+ arrow_table = concat_tables(
+ thread_map(
+ table_cls.from_file,
+ [posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]],
+ tqdm_class=hf_tqdm,
+ desc="Loading dataset from disk",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(state["_data_files"]) <= 16 or None,
+ )
+ )
+
+ split = state["_split"]
+ split = Split(split) if split is not None else split
+
+ dataset = Dataset(
+ arrow_table=arrow_table,
+ info=dataset_info,
+ split=split,
+ fingerprint=state["_fingerprint"],
+ )
+
+ format = {
+ "type": state["_format_type"],
+ "format_kwargs": state["_format_kwargs"],
+ "columns": state["_format_columns"],
+ "output_all_columns": state["_output_all_columns"],
+ }
+ dataset = dataset.with_format(**format)
+
+ return dataset
+
+ @property
+ def data(self) -> Table:
+ """The Apache Arrow table backing the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.data
+ MemoryMappedTable
+ text: string
+ label: int64
+ ----
+ text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]]
+ label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]]
+ ```
+ """
+ return self._data
+
+ @property
+ def cache_files(self) -> List[dict]:
+ """The cache files containing the Apache Arrow table backing the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.cache_files
+ [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]
+ ```
+ """
+ cache_files = list_table_cache_files(self._data)
+ if self._indices is not None:
+ cache_files += list_table_cache_files(self._indices)
+ return [{"filename": cache_filename} for cache_filename in cache_files]
+
+ @property
+ def num_columns(self) -> int:
+ """Number of columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.num_columns
+ 2
+ ```
+ """
+ return self._data.num_columns
+
+ @property
+ def num_rows(self) -> int:
+ """Number of rows in the dataset (same as [`Dataset.__len__`]).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.num_rows
+ 1066
+ ```
+ """
+ if self._indices is not None:
+ return self._indices.num_rows
+ return self._data.num_rows
+
+ @property
+ def column_names(self) -> List[str]:
+ """Names of the columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.column_names
+ ['text', 'label']
+ ```
+ """
+ return self._data.column_names
+
+ @property
+ def shape(self) -> Tuple[int, int]:
+ """Shape of the dataset (number of columns, number of rows).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.shape
+ (1066, 2)
+ ```
+ """
+ if self._indices is not None:
+ return (self._indices.num_rows, self._data.num_columns)
+ return self._data.shape
+
+ def unique(self, column: str) -> List:
+ """Return a list of the unique elements in a column.
+
+ This is implemented in the low-level backend and as such, very fast.
+
+ Args:
+ column (`str`):
+ Column name (list all the column names with [`~datasets.Dataset.column_names`]).
+
+ Returns:
+ `list`: List of unique elements in the given column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.unique('label')
+ [1, 0]
+ ```
+ """
+ if column not in self._data.column_names:
+ raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
+
+ if self._indices is not None and self._indices.num_rows != self._data.num_rows:
+ dataset = self.flatten_indices()
+ else:
+ dataset = self
+
+ return dataset._data.column(column).unique().to_pylist()
+
+ def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset":
+ """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table.
+
+ Args:
+ column (`str`):
+ The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`])
+ include_nulls (`bool`, defaults to `False`):
+ Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("boolq", split="validation")
+ >>> ds.features
+ {'answer': Value(dtype='bool', id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ >>> ds = ds.class_encode_column('answer')
+ >>> ds.features
+ {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ ```
+ """
+ # Sanity checks
+ if column not in self._data.column_names:
+ raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
+ src_feat = self._info.features[column]
+ if not isinstance(src_feat, Value):
+ raise ValueError(
+ f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}."
+ )
+
+ if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)):
+
+ def stringify_column(batch):
+ batch[column] = [
+ str(sample) if include_nulls or sample is not None else None for sample in batch[column]
+ ]
+ return batch
+
+ dset = self.map(
+ stringify_column,
+ batched=True,
+ desc="Stringifying the column",
+ )
+ else:
+ dset = self
+
+ # Create the new feature
+ class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None)
+ dst_feat = ClassLabel(names=class_names)
+
+ def cast_to_class_labels(batch):
+ batch[column] = [
+ dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None
+ for sample in batch[column]
+ ]
+ return batch
+
+ new_features = dset.features.copy()
+ new_features[column] = dst_feat
+
+ dset = dset.map(
+ cast_to_class_labels,
+ batched=True,
+ features=new_features,
+ desc="Casting to class labels",
+ )
+
+ return dset
+
+ @fingerprint_transform(inplace=False)
+ def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset":
+ """Flatten the table.
+ Each column with a struct type is flattened into one column per struct field.
+ Other columns are left unchanged.
+
+ Args:
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with flattened columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad", split="train")
+ >>> ds.features
+ {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ >>> ds.flatten()
+ Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 87599
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ for depth in range(1, max_depth):
+ if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema):
+ dataset._data = dataset._data.flatten()
+ else:
+ break
+ dataset.info.features = self._info.features.flatten(max_depth=max_depth)
+ dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names})
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.')
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ def cast(
+ self,
+ features: Features,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ num_proc: Optional[int] = None,
+ ) -> "Dataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset.
+ batch_size (`int`, defaults to `1000`):
+ Number of examples per batch provided to cast.
+ If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ load_from_cache_file (`bool`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`].
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, ClassLabel, Value
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds.features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ if sorted(features) != sorted(self._data.column_names):
+ raise ValueError(
+ f"The columns in features ({list(features)}) must be identical "
+ f"as the columns in the dataset: {self._data.column_names}"
+ )
+
+ schema = features.arrow_schema
+ format = self.format
+ dataset = self.with_format("arrow")
+ # capture the PyArrow version here to make the lambda serializable on Windows
+ dataset = dataset.map(
+ partial(table_cast, schema=schema),
+ batched=True,
+ batch_size=batch_size,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ num_proc=num_proc,
+ features=features,
+ desc="Casting the dataset",
+ )
+ dataset = dataset.with_format(**format)
+ return dataset
+
+ @fingerprint_transform(inplace=False)
+ def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature (`FeatureType`):
+ Target feature.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ if hasattr(feature, "decode_example"):
+ dataset = copy.deepcopy(self)
+ dataset._info.features[column] = feature
+ dataset._fingerprint = new_fingerprint
+ dataset._data = dataset._data.cast(dataset.features.arrow_schema)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ return dataset
+ else:
+ features = self.features
+ features[column] = feature
+ return self.cast(features)
+
+ @transmit_tasks
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+
+ You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method
+ doesn't copy the data of the remaining columns and is thus faster.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.remove_columns('label')
+ Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ >>> ds = ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0
+ Dataset({
+ features: [],
+ num_rows: 0
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ missing_columns = set(column_names) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+
+ for column_name in column_names:
+ del dataset._info.features[column_name]
+
+ dataset._data = dataset._data.drop(column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @fingerprint_transform(inplace=False)
+ def rename_column(
+ self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None
+ ) -> "Dataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.rename_column('label', 'label_new')
+ Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ if original_column_name not in dataset._data.column_names:
+ raise ValueError(
+ f"Original column name {original_column_name} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+ if new_column_name in dataset._data.column_names:
+ raise ValueError(
+ f"New column name {new_column_name} already in the dataset. "
+ f"Please choose a column name which is not already in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+ if not new_column_name:
+ raise ValueError("New column name is empty.")
+
+ def rename(columns):
+ return [new_column_name if col == original_column_name else col for col in columns]
+
+ new_column_names = rename(self._data.column_names)
+ if self._format_columns is not None:
+ dataset._format_columns = rename(self._format_columns)
+
+ dataset._info.features = Features(
+ {
+ new_column_name if col == original_column_name else col: feature
+ for col, feature in self._info.features.items()
+ }
+ )
+
+ dataset._data = dataset._data.rename_columns(new_column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @fingerprint_transform(inplace=False)
+ def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with renamed columns
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
+ Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+
+ extra_columns = set(column_mapping.keys()) - set(dataset.column_names)
+ if extra_columns:
+ raise ValueError(
+ f"Original column names {extra_columns} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+
+ number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values()))
+ if number_of_duplicates_in_new_columns != 0:
+ raise ValueError(
+ "New column names must all be different, but this column mapping "
+ f"has {number_of_duplicates_in_new_columns} duplicates"
+ )
+
+ empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col]
+ if empty_new_columns:
+ raise ValueError(f"New column names {empty_new_columns} are empty.")
+
+ def rename(columns):
+ return [column_mapping[col] if col in column_mapping else col for col in columns]
+
+ new_column_names = rename(self._data.column_names)
+ if self._format_columns is not None:
+ dataset._format_columns = rename(self._format_columns)
+
+ dataset._info.features = Features(
+ {
+ column_mapping[col] if col in column_mapping else col: feature
+ for col, feature in (self._info.features or {}).items()
+ }
+ )
+
+ dataset._data = dataset._data.rename_columns(new_column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """Select one or several column(s) in the dataset and the features
+ associated to them.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform. If `None`,
+ the new fingerprint is computed using a hash of the previous
+ fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset object which only consists of
+ selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.select_columns(['text'])
+ Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ ```
+ """
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ missing_columns = set(column_names) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the "
+ "dataset. Current columns in the dataset: "
+ f"{self._data.column_names}."
+ )
+
+ dataset = copy.deepcopy(self)
+ dataset._data = dataset._data.select(column_names)
+ dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names})
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ def __len__(self):
+ """Number of rows in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.__len__
+
+ ```
+ """
+ return self.num_rows
+
+ def __iter__(self):
+ """Iterate through the examples.
+
+ If a formatting is set with [`Dataset.set_format`] rows will be returned with the
+ selected format.
+ """
+ if self._indices is None:
+ # Fast iteration
+ # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
+ format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
+ formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
+ batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER
+ for pa_subtable in table_iter(self.data, batch_size=batch_size):
+ for i in range(pa_subtable.num_rows):
+ pa_subtable_ex = pa_subtable.slice(i, 1)
+ formatted_output = format_table(
+ pa_subtable_ex,
+ 0,
+ formatter=formatter,
+ format_columns=self._format_columns,
+ output_all_columns=self._output_all_columns,
+ )
+ yield formatted_output
+ else:
+ for i in range(self.num_rows):
+ yield self._getitem(
+ i,
+ )
+
+ def iter(self, batch_size: int, drop_last_batch: bool = False):
+ """Iterate through the batches of size `batch_size`.
+
+ If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the
+ selected format.
+
+ Args:
+ batch_size (:obj:`int`): size of each batch to yield.
+ drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
+ dropped
+ """
+ if self._indices is None:
+ # Fast iteration
+ # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
+ format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
+ formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
+ for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch):
+ formatted_batch = format_table(
+ pa_subtable,
+ range(pa_subtable.num_rows),
+ formatter=formatter,
+ format_columns=self._format_columns,
+ output_all_columns=self._output_all_columns,
+ )
+ yield formatted_batch
+ else:
+ num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size
+ for i in range(0, num_rows, batch_size):
+ yield self._getitem(
+ slice(i, i + batch_size),
+ )
+
+ def __repr__(self):
+ return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})"
+
+ @property
+ def format(self):
+ return {
+ "type": self._format_type,
+ "format_kwargs": self._format_kwargs,
+ "columns": self.column_names if self._format_columns is None else self._format_columns,
+ "output_all_columns": self._output_all_columns,
+ }
+
+ @contextlib.contextmanager
+ def formatted_as(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """To be used in a `with` statement. Set `__getitem__` return format (type and columns).
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__`` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+ """
+ old_format_type = self._format_type
+ old_format_kwargs = self._format_kwargs
+ old_format_columns = self._format_columns
+ old_output_all_columns = self._output_all_columns
+ try:
+ self.set_format(type, columns, output_all_columns, **format_kwargs)
+ yield
+ finally:
+ self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)
+
+ @fingerprint_transform(inplace=True)
+ def set_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`].
+
+ Args:
+ type (`str`, *optional*):
+ Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
+ gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as:
+
+ ```
+ new formatted columns = (all columns - previously unformatted columns)
+ ```
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type='numpy', columns=['text', 'label'])
+ >>> ds.format
+ {'type': 'numpy',
+ 'format_kwargs': {},
+ 'columns': ['text', 'label'],
+ 'output_all_columns': False}
+ ```
+ """
+ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format)
+
+ # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter
+ type = get_format_type_from_alias(type)
+ get_formatter(type, features=self._info.features, **format_kwargs)
+
+ # Check filter column
+ if isinstance(columns, str):
+ columns = [columns]
+ if isinstance(columns, tuple):
+ columns = list(columns)
+ if columns is not None:
+ missing_columns = set(columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+ if columns is not None:
+ columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs
+
+ self._format_type = type
+ self._format_kwargs = format_kwargs
+ self._format_columns = columns
+ self._output_all_columns = output_all_columns
+ logger.debug(
+ "Set __getitem__(key) output type to %s for %s columns "
+ " (when key is int or slice) and %s output other (un-formatted) columns.",
+ "python objects" if type is None else type,
+ "no" if columns is None else str(columns),
+ "do" if output_all_columns else "don't",
+ )
+
+ def reset_format(self):
+ """Reset `__getitem__` return format to python objects and all columns.
+
+ Same as `self.set_format()`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds.format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ >>> ds.reset_format()
+ >>> ds.format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ ```
+ """
+ self.set_format()
+
+ def set_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Args:
+ transform (`Callable`, *optional*):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to True, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
+ >>> def encode(batch):
+ ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt')
+ >>> ds.set_transform(encode)
+ >>> ds[0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1]),
+ 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895,
+ 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211,
+ 5637, 1998, 11690, 2336, 1012, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0])}
+ ```
+ """
+ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
+
+ Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object.
+
+ Args:
+ type (`str`, *optional*):
+ Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds.format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'tensorflow'}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+ return dataset
+
+ def with_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object.
+
+ Args:
+ transform (`Callable`, `optional`):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, `optional`):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to `True`, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> def encode(example):
+ ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt')
+ >>> ds = ds.with_transform(encode)
+ >>> ds[0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1]),
+ 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
+ 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
+ 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0])}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
+ return dataset
+
+ @deprecated()
+ def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset":
+ """
+ Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates).
+
+ Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting.
+
+ Args:
+ task (`Union[str, TaskTemplate]`):
+ The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include:
+
+ - `"text-classification"`
+ - `"question-answering"`
+
+ If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates).
+ id (`int`, defaults to `0`):
+ The id required to unambiguously identify the task template when multiple task templates of the same type are supported.
+ """
+ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD
+ if isinstance(task, str):
+ tasks = [template.task for template in (self.info.task_templates or [])]
+ compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task]
+ if not compatible_templates:
+ raise ValueError(
+ f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}"
+ )
+
+ if not 0 <= id < len(compatible_templates):
+ templates_list_str = "\n".join(
+ f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates)
+ )
+ raise ValueError(
+ f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}"
+ )
+ template = compatible_templates[id]
+ elif isinstance(task, TaskTemplate):
+ template = task
+ else:
+ raise ValueError(
+ f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}."
+ )
+ template = template.align_with_features(self.info.features)
+ column_mapping = template.column_mapping
+ columns_to_drop = [column for column in self.column_names if column not in column_mapping]
+ dataset = self.remove_columns(columns_to_drop)
+ dataset = dataset.rename_columns(column_mapping)
+ # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__`
+ dataset.info.task_templates = None
+ dataset = dataset.cast(features=template.features)
+ return dataset
+
+ def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]:
+ """
+ Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices)
+ """
+ if isinstance(key, bool):
+ raise TypeError("dataset index must be int, str, slice or collection of int, not bool")
+ format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type
+ format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns
+ output_all_columns = (
+ kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns
+ )
+ format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs
+ format_kwargs = format_kwargs if format_kwargs is not None else {}
+ formatter = get_formatter(format_type, features=self._info.features, **format_kwargs)
+ pa_subtable = query_table(self._data, key, indices=self._indices)
+ formatted_output = format_table(
+ pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns
+ )
+ return formatted_output
+
+ @overload
+ def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811
+ ...
+
+ @overload
+ def __getitem__(self, key: str) -> List: # noqa: F811
+ ...
+
+ def __getitem__(self, key): # noqa: F811
+ """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools)."""
+ return self._getitem(key)
+
+ def __getitems__(self, keys: List) -> List:
+ """Can be used to get a batch using a list of integers indices."""
+ batch = self.__getitem__(keys)
+ n_examples = len(batch[next(iter(batch))])
+ return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)]
+
+ def cleanup_cache_files(self) -> int:
+ """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is
+ one.
+
+ Be careful when running this command that no other process is currently using other cache files.
+
+ Returns:
+ `int`: Number of removed files.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.cleanup_cache_files()
+ 10
+ ```
+ """
+ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files]
+ if not current_cache_files:
+ return 0
+ cache_directory = os.path.dirname(current_cache_files[0])
+ logger.info(f"Listing files in {cache_directory}")
+ files: List[str] = os.listdir(cache_directory)
+ files_to_remove = []
+ for f_name in files:
+ full_name = os.path.abspath(os.path.join(cache_directory, f_name))
+ if f_name.startswith("cache-") and f_name.endswith(".arrow"):
+ if full_name in current_cache_files:
+ logger.info(f"Keeping currently used cache file at {full_name}")
+ continue
+ files_to_remove.append(full_name)
+ for file_path in files_to_remove:
+ logger.info(f"Removing {file_path}")
+ os.remove(file_path)
+ return len(files_to_remove)
+
+ def _get_cache_file_path(self, fingerprint):
+ if is_caching_enabled() and self.cache_files:
+ cache_file_name = "cache-" + fingerprint + ".arrow"
+ cache_directory = os.path.dirname(self.cache_files[0]["filename"])
+ else:
+ cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow"
+ cache_directory = get_temporary_cache_files_directory()
+ cache_file_path = os.path.join(cache_directory, cache_file_name)
+ return cache_file_path
+
+ @transmit_tasks
+ @transmit_format
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
+ new_fingerprint: Optional[str] = None,
+ desc: Optional[str] = None,
+ ) -> "Dataset":
+ """
+ Apply a function to all the examples in the table (individually or in batches) and update the table.
+ If your function returns a column that already exists, then it overwrites it.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
+ - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`): Function with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific Features to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Disallow null values in the table.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially.
+ suffix_template (`str`):
+ If `cache_file_name` is specified, then this suffix
+ will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for
+ `rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while mapping examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> ds[0:3]["text"]
+ ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
+ 'Review: the soundtrack alone is worth the price of admission .',
+ 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .']
+
+ # process a batch of examples
+ >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
+ # set number of processors
+ >>> ds = ds.map(add_prefix, num_proc=4)
+ ```
+ """
+ if keep_in_memory and cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.")
+
+ if num_proc is not None and num_proc <= 0:
+ raise ValueError("num_proc must be an integer > 0.")
+
+ # If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway)
+ if len(self) == 0:
+ if self._indices is not None: # empty indices mapping
+ self = Dataset(
+ self.data.slice(0, 0),
+ info=self.info.copy(),
+ split=self.split,
+ fingerprint=new_fingerprint,
+ )
+ if remove_columns:
+ return self.remove_columns(remove_columns)
+ else:
+ return self
+
+ if function is None:
+ function = lambda x: x # noqa: E731
+
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ if input_columns is not None:
+ missing_columns = set(input_columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+
+ if isinstance(remove_columns, str):
+ remove_columns = [remove_columns]
+
+ if remove_columns is not None:
+ missing_columns = set(remove_columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if fn_kwargs is None:
+ fn_kwargs = {}
+
+ if num_proc is not None and num_proc > len(self):
+ num_proc = len(self)
+ logger.warning(
+ f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}."
+ )
+
+ dataset_kwargs = {
+ "shard": self,
+ "function": function,
+ "with_indices": with_indices,
+ "with_rank": with_rank,
+ "input_columns": input_columns,
+ "batched": batched,
+ "batch_size": batch_size,
+ "drop_last_batch": drop_last_batch,
+ "remove_columns": remove_columns,
+ "keep_in_memory": keep_in_memory,
+ "writer_batch_size": writer_batch_size,
+ "features": features,
+ "disable_nullable": disable_nullable,
+ "fn_kwargs": fn_kwargs,
+ }
+
+ if new_fingerprint is None:
+ # we create a unique hash from the function,
+ # current dataset file and the mapping args
+ transform = format_transform_for_fingerprint(Dataset._map_single)
+ kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs)
+ kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint"
+ new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)
+ else:
+ validate_fingerprint(new_fingerprint)
+ dataset_kwargs["new_fingerprint"] = new_fingerprint
+
+ if self.cache_files:
+ if cache_file_name is None:
+ cache_file_name = self._get_cache_file_path(new_fingerprint)
+ dataset_kwargs["cache_file_name"] = cache_file_name
+
+ def load_processed_shard_from_cache(shard_kwargs):
+ """Load a processed shard from cache if it exists, otherwise throw an error."""
+ shard = shard_kwargs["shard"]
+ # Check if we've already cached this computation (indexed by a hash)
+ if shard_kwargs["cache_file_name"] is not None:
+ if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file:
+ info = shard.info.copy()
+ info.features = features
+ info.task_templates = None
+ return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split)
+ raise NonExistentDatasetError
+
+ num_shards = num_proc if num_proc is not None else 1
+ if batched and drop_last_batch:
+ pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size
+ else:
+ pbar_total = len(self)
+
+ shards_done = 0
+ if num_proc is None or num_proc == 1:
+ transformed_dataset = None
+ try:
+ transformed_dataset = load_processed_shard_from_cache(dataset_kwargs)
+ logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}")
+ except NonExistentDatasetError:
+ pass
+ if transformed_dataset is None:
+ with hf_tqdm(
+ unit=" examples",
+ total=pbar_total,
+ desc=desc or "Map",
+ ) as pbar:
+ for rank, done, content in Dataset._map_single(**dataset_kwargs):
+ if done:
+ shards_done += 1
+ logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
+ transformed_dataset = content
+ else:
+ pbar.update(content)
+ assert transformed_dataset is not None, "Failed to retrieve the result from map"
+ # update fingerprint if the dataset changed
+ if transformed_dataset._fingerprint != self._fingerprint:
+ transformed_dataset._fingerprint = new_fingerprint
+ return transformed_dataset
+ else:
+
+ def format_cache_file_name(
+ cache_file_name: Optional[str],
+ rank: Union[int, Literal["*"]], # noqa: F722
+ ) -> Optional[str]:
+ if not cache_file_name:
+ return cache_file_name
+ sep = cache_file_name.rindex(".")
+ base_name, extension = cache_file_name[:sep], cache_file_name[sep:]
+ if isinstance(rank, int):
+ cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension
+ logger.info(f"Process #{rank} will write at {cache_file_name}")
+ else:
+ cache_file_name = (
+ base_name
+ + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc)
+ + extension
+ )
+ return cache_file_name
+
+ def format_new_fingerprint(new_fingerprint: str, rank: int) -> str:
+ new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc)
+ validate_fingerprint(new_fingerprint)
+ return new_fingerprint
+
+ prev_env = deepcopy(os.environ)
+ # check if parallelism if off
+ # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22
+ if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in (
+ "",
+ "off",
+ "false",
+ "f",
+ "no",
+ "n",
+ "0",
+ ):
+ logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.")
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
+ shards = [
+ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory)
+ for rank in range(num_proc)
+ ]
+ kwargs_per_job = [
+ {
+ **dataset_kwargs,
+ "shard": shards[rank],
+ "cache_file_name": format_cache_file_name(cache_file_name, rank),
+ "rank": rank,
+ "offset": sum(len(s) for s in shards[:rank]),
+ "new_fingerprint": format_new_fingerprint(new_fingerprint, rank),
+ }
+ for rank in range(num_shards)
+ ]
+
+ transformed_shards = [None] * num_shards
+ for rank in range(num_shards):
+ try:
+ transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank])
+ kwargs_per_job[rank] = None
+ except NonExistentDatasetError:
+ pass
+
+ kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None]
+
+ # We try to create a pool with as many workers as dataset not yet cached.
+ if kwargs_per_job:
+ if len(kwargs_per_job) < num_shards:
+ logger.info(
+ f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache."
+ )
+ with Pool(len(kwargs_per_job)) as pool:
+ os.environ = prev_env
+ logger.info(f"Spawning {num_proc} processes")
+ with hf_tqdm(
+ unit=" examples",
+ total=pbar_total,
+ desc=(desc or "Map") + f" (num_proc={num_proc})",
+ ) as pbar:
+ for rank, done, content in iflatmap_unordered(
+ pool, Dataset._map_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ shards_done += 1
+ logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
+ transformed_shards[rank] = content
+ else:
+ pbar.update(content)
+ # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805)
+ for kwargs in kwargs_per_job:
+ del kwargs["shard"]
+ else:
+ logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}")
+ assert (
+ None not in transformed_shards
+ ), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results"
+ logger.info(f"Concatenating {num_proc} shards")
+ result = _concatenate_map_style_datasets(transformed_shards)
+ # update fingerprint if the dataset changed
+ if any(
+ transformed_shard._fingerprint != shard._fingerprint
+ for transformed_shard, shard in zip(transformed_shards, shards)
+ ):
+ result._fingerprint = new_fingerprint
+ else:
+ result._fingerprint = self._fingerprint
+ return result
+
+ @staticmethod
+ def _map_single(
+ shard: "Dataset",
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[List[str]] = None,
+ keep_in_memory: bool = False,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ new_fingerprint: Optional[str] = None,
+ rank: Optional[int] = None,
+ offset: int = 0,
+ ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]:
+ """Apply a function to all the elements in the table (individually or in batches)
+ and update the table (if function does update examples).
+
+ Args:
+ shard (`datasets.Dataset`): Dataset to map the transform on.
+ function (`Callable`): with one of the following signature:
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: lambda x: x
+ with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`): Provide batch of examples to `function`
+ batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`
+ `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`
+ drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
+ features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`): Disallow null values in the table.
+ fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function`
+ new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing
+ offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`.
+ """
+ if fn_kwargs is None:
+ fn_kwargs = {}
+
+ # If we do batch computation but no batch size is provided, default to the full dataset
+ if batched and (batch_size is None or batch_size <= 0):
+ batch_size = shard.num_rows
+
+ # We set this variable to True after processing the first example/batch in
+ # `apply_function_on_filtered_inputs` if the map function returns a dict.
+ # If set to False, no new arrow table will be created
+
+ update_data = None
+
+ format_kwargs = shard._format_kwargs.copy()
+ # Lazy formatting is only available for the default format (None/python)
+ if not input_columns and shard._format_type is None:
+ format_kwargs["lazy"] = True
+ input_formatter = get_formatter(
+ shard._format_type,
+ features=shard.features,
+ **format_kwargs,
+ )
+
+ class NumExamplesMismatchError(Exception):
+ pass
+
+ def validate_function_output(processed_inputs, indices):
+ """Validate output of the map function."""
+ if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)):
+ raise TypeError(
+ f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects."
+ )
+ elif isinstance(indices, list) and isinstance(processed_inputs, Mapping):
+ allowed_batch_return_types = (list, np.ndarray, pd.Series)
+ if config.POLARS_AVAILABLE and "polars" in sys.modules:
+ import polars as pl
+
+ allowed_batch_return_types += (pl.Series, pl.DataFrame)
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
+ import tensorflow as tf
+
+ allowed_batch_return_types += (tf.Tensor,)
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ allowed_batch_return_types += (torch.Tensor,)
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
+ import jax.numpy as jnp
+
+ allowed_batch_return_types += (jnp.ndarray,)
+ all_dict_values_are_lists = all(
+ isinstance(value, allowed_batch_return_types) for value in processed_inputs.values()
+ )
+ if all_dict_values_are_lists is False:
+ raise TypeError(
+ f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`."
+ )
+
+ def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0):
+ """Utility to apply the function on a selection of columns."""
+ nonlocal update_data
+ inputs = format_table(
+ pa_inputs,
+ 0 if not batched else range(pa_inputs.num_rows),
+ format_columns=input_columns,
+ formatter=input_formatter,
+ )
+ fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]
+ if offset == 0:
+ effective_indices = indices
+ else:
+ effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset
+ additional_args = ()
+ if with_indices:
+ additional_args += (effective_indices,)
+ if with_rank:
+ additional_args += (rank,)
+ processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
+ if isinstance(processed_inputs, LazyDict):
+ processed_inputs = {
+ k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format
+ }
+ returned_lazy_dict = True
+ else:
+ returned_lazy_dict = False
+ if update_data is None:
+ # Check if the function returns updated examples
+ update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame))
+ validate_function_output(processed_inputs, indices)
+ if not update_data:
+ return None # Nothing to update, let's move on
+ if shard._format_type or input_columns:
+ # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release)
+ inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns()))
+ elif isinstance(inputs, LazyDict):
+ inputs_to_merge = {
+ k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items()
+ }
+ else:
+ inputs_to_merge = inputs
+ if remove_columns is not None:
+ for column in remove_columns:
+ # `function` can modify input in-place causing column to be already removed.
+ if column in inputs_to_merge:
+ inputs_to_merge.pop(column)
+ if returned_lazy_dict and column in processed_inputs:
+ processed_inputs.pop(column)
+ if check_same_num_examples:
+ input_num_examples = len(pa_inputs)
+ processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))])
+ if input_num_examples != processed_inputs_num_examples:
+ raise NumExamplesMismatchError()
+ if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping):
+ # The .map() transform *updates* the dataset:
+ # the output dictionary contains both the the input data and the output data.
+ # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently.
+ return {**inputs_to_merge, **processed_inputs}
+ else:
+ return processed_inputs
+
+ def init_buffer_and_writer():
+ # Prepare output buffer and batched writer in memory or on file if we update the table
+ writer_features = features
+ if writer_features is None:
+ writer_features = shard.features
+ update_features = True
+ else:
+ update_features = False
+ if keep_in_memory or cache_file_name is None:
+ buf_writer = pa.BufferOutputStream()
+ tmp_file = None
+ writer = ArrowWriter(
+ features=writer_features,
+ stream=buf_writer,
+ writer_batch_size=writer_batch_size,
+ update_features=update_features,
+ fingerprint=new_fingerprint,
+ disable_nullable=disable_nullable,
+ )
+ else:
+ buf_writer = None
+ logger.info(f"Caching processed dataset at {cache_file_name}")
+ tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False)
+ writer = ArrowWriter(
+ features=writer_features,
+ path=tmp_file.name,
+ writer_batch_size=writer_batch_size,
+ update_features=update_features,
+ fingerprint=new_fingerprint,
+ disable_nullable=disable_nullable,
+ )
+ return buf_writer, writer, tmp_file
+
+ num_examples_progress_update = 0
+ # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer`
+ buf_writer, writer, tmp_file = None, None, None
+
+ # Check if Polars is available and import it if so
+ if config.POLARS_AVAILABLE and "polars" in sys.modules:
+ import polars as pl
+
+ # Optionally initialize the writer as a context manager
+ with contextlib.ExitStack() as stack:
+ try:
+ arrow_formatted_shard = shard.with_format("arrow")
+
+ # Loop over single examples or batches and write to buffer/file if examples are to be updated
+ if not batched:
+ shard_iterable = enumerate(arrow_formatted_shard)
+ else:
+ num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size
+ shard_iterable = zip(
+ range(0, num_rows, batch_size),
+ arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch),
+ )
+ if not batched:
+ _time = time.time()
+ for i, example in shard_iterable:
+ example = apply_function_on_filtered_inputs(example, i, offset=offset)
+ if update_data:
+ if i == 0:
+ buf_writer, writer, tmp_file = init_buffer_and_writer()
+ stack.enter_context(writer)
+ if isinstance(example, pa.Table):
+ writer.write_row(example)
+ elif isinstance(example, pd.DataFrame):
+ writer.write_row(pa.Table.from_pandas(example))
+ elif (
+ config.POLARS_AVAILABLE
+ and "polars" in sys.modules
+ and isinstance(example, pl.DataFrame)
+ ):
+ writer.write_row(example.to_arrow())
+ else:
+ writer.write(example)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield rank, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ else:
+ _time = time.time()
+ for i, batch in shard_iterable:
+ num_examples_in_batch = len(batch)
+ indices = list(
+ range(*(slice(i, i + batch_size).indices(shard.num_rows)))
+ ) # Something simpler?
+ try:
+ batch = apply_function_on_filtered_inputs(
+ batch,
+ indices,
+ check_same_num_examples=len(shard.list_indexes()) > 0,
+ offset=offset,
+ )
+ except NumExamplesMismatchError:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it."
+ ) from None
+ if update_data:
+ if i == 0:
+ buf_writer, writer, tmp_file = init_buffer_and_writer()
+ stack.enter_context(writer)
+ if isinstance(batch, pa.Table):
+ writer.write_table(batch)
+ elif isinstance(batch, pd.DataFrame):
+ writer.write_table(pa.Table.from_pandas(batch))
+ elif (
+ config.POLARS_AVAILABLE and "polars" in sys.modules and isinstance(batch, pl.DataFrame)
+ ):
+ writer.write_table(batch.to_arrow())
+ else:
+ writer.write_batch(batch)
+ num_examples_progress_update += num_examples_in_batch
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield rank, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ if update_data and writer is not None:
+ writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
+ except (Exception, KeyboardInterrupt):
+ yield rank, False, num_examples_progress_update
+ if update_data:
+ if writer is not None:
+ writer.finalize()
+ if tmp_file is not None:
+ tmp_file.close()
+ if os.path.exists(tmp_file.name):
+ os.remove(tmp_file.name)
+ raise
+
+ yield rank, False, num_examples_progress_update
+ if update_data and tmp_file is not None:
+ tmp_file.close()
+ shutil.move(tmp_file.name, cache_file_name)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(cache_file_name, 0o666 & ~umask)
+
+ if update_data:
+ # Create new Dataset from buffer or file
+ info = shard.info.copy()
+ info.features = writer._features
+ info.task_templates = None
+ if buf_writer is None:
+ yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split)
+ else:
+ yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split)
+ else:
+ yield rank, True, shard
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1"
+ )
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
+ new_fingerprint: Optional[str] = None,
+ desc: Optional[str] = None,
+ ) -> "Dataset":
+ """Apply a filter function to all the elements in the table in batches
+ and update the table so that the dataset only includes examples according to the filter function.
+
+ Args:
+ function (`Callable`): Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ If no function is provided, defaults to an always `True` function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if
+ `batched = True`. If `batched = False`, one example per batch is passed to `function`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ fn_kwargs (`dict`, *optional*):
+ Keyword arguments to be passed to `function`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ suffix_template (`str`):
+ If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each.
+ For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`,
+ the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default
+ `_{rank:05d}_of_{num_proc:05d}`).
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while filtering examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.filter(lambda x: x["label"] == 1)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`"
+ )
+
+ if function is None:
+ function = lambda x: True # noqa: E731
+
+ if len(self) == 0:
+ return self
+
+ indices = self.map(
+ function=partial(
+ get_indices_from_mask_function,
+ function,
+ batched,
+ with_indices,
+ with_rank,
+ input_columns,
+ self._indices,
+ ),
+ with_indices=True,
+ with_rank=True,
+ features=Features({"indices": Value("uint64")}),
+ batched=True,
+ batch_size=batch_size,
+ remove_columns=self.column_names,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ suffix_template=suffix_template,
+ new_fingerprint=new_fingerprint,
+ input_columns=input_columns,
+ desc=desc or "Filter",
+ )
+ new_dataset = copy.deepcopy(self)
+ new_dataset._indices = indices.data
+ new_dataset._fingerprint = new_fingerprint
+ return new_dataset
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"])
+ def flatten_indices(
+ self,
+ keep_in_memory: bool = False,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ num_proc: Optional[int] = None,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create and cache a new Dataset by flattening the indices mapping.
+
+ Args:
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_name (`str`, *optional*, default `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Allow null values in the table.
+ num_proc (`int`, optional, default `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ """
+
+ return self.map(
+ batched=True, # for speed
+ keep_in_memory=keep_in_memory,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ new_fingerprint=new_fingerprint,
+ desc="Flattening the indices",
+ num_proc=num_proc,
+ )
+
+ def _new_dataset_with_indices(
+ self,
+ indices_cache_file_name: Optional[str] = None,
+ indices_buffer: Optional[pa.Buffer] = None,
+ fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the
+ current Dataset.
+ """
+
+ if indices_cache_file_name is None and indices_buffer is None:
+ raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.")
+
+ if fingerprint is None:
+ raise ValueError("please specify a fingerprint for the dataset with indices")
+
+ if indices_cache_file_name is not None:
+ indices_table = MemoryMappedTable.from_file(indices_cache_file_name)
+ else:
+ indices_table = InMemoryTable.from_buffer(indices_buffer)
+
+ # Return new Dataset object
+ # don't forget to copy the objects
+ return Dataset(
+ self._data,
+ info=self.info.copy(),
+ split=self.split,
+ indices_table=indices_table,
+ fingerprint=fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
+ def select(
+ self,
+ indices: Iterable,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows selected following the list/array of indices.
+
+ Args:
+ indices (`range`, `list`, `iterable`, `ndarray` or `Series`):
+ Range, list or 1D-array of integer indices for indexing.
+ If the indices correspond to a contiguous range, the Arrow table is simply sliced.
+ However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient,
+ but still faster than recreating an Arrow table made of the requested rows.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the indices mapping in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.select(range(4))
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # If indices is a PyArrow array, we convert to NumPy
+ if isinstance(indices, (pa.Array, pa.ChunkedArray)):
+ indices = indices.to_numpy().astype(np.int64)
+
+ # Convert generator objects to lists
+ if isinstance(indices, Iterator):
+ indices = list(indices)
+
+ # If the indices are contiguous, simply slice the arrow table
+ if isinstance(indices, range):
+ if _is_range_contiguous(indices) and indices.start >= 0:
+ start, length = indices.start, indices.stop - indices.start
+ return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
+ else:
+ try:
+ start = next(iter(indices))
+ except StopIteration:
+ # if `indices` is an empty iterable, we return an empty dataset
+ return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
+ if start >= 0:
+ counter_from_start = itertools.count(start=start)
+ if all(i == j for i, j in zip(indices, counter_from_start)):
+ length = next(counter_from_start) - start
+ return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
+
+ # If not contiguous, we need to create a new indices mapping
+ return self._select_with_indices_mapping(
+ indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def _select_contiguous(
+ self,
+ start: int,
+ length: int,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows from a contiguous slice of data.
+ The slice is defined by that start index and its length.
+
+ Args:
+ start (`int`): start index.
+ length (`int`): length of the slice to select.
+ new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds._select_contiguous(0, 4)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ _check_valid_indices_value(start, len(self))
+ _check_valid_indices_value(start + length - 1, len(self))
+ if self._indices is None or length == 0:
+ return Dataset(
+ self.data.slice(start, length),
+ info=self.info.copy(),
+ split=self.split,
+ fingerprint=new_fingerprint,
+ )
+ else:
+ return Dataset(
+ self.data,
+ info=self.info.copy(),
+ split=self.split,
+ indices_table=self._indices.slice(start, length),
+ fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
+ def _select_with_indices_mapping(
+ self,
+ indices: Iterable,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows selected following the list/array of indices.
+ The new dataset is made by creating a new indices mapping on top of the main arrow table.
+
+ Args:
+ indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing.
+ keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
+ new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds._select_with_indices_mapping(range(4))
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # Prepare the writer for our indices arrow table
+ if keep_in_memory or indices_cache_file_name is None:
+ buf_writer = pa.BufferOutputStream()
+ tmp_file = None
+ writer = ArrowWriter(
+ stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
+ )
+ else:
+ buf_writer = None
+ logger.info(f"Caching indices mapping at {indices_cache_file_name}")
+ tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False)
+ writer = ArrowWriter(
+ path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
+ )
+
+ indices = indices if isinstance(indices, list) else list(indices)
+
+ size = len(self)
+ if indices:
+ _check_valid_indices_value(int(max(indices)), size=size)
+ _check_valid_indices_value(int(min(indices)), size=size)
+ else:
+ return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
+
+ indices_array = pa.array(indices, type=pa.uint64())
+ # Check if we need to convert indices
+ if self._indices is not None:
+ indices_array = self._indices.column(0).take(indices_array)
+
+ indices_table = pa.Table.from_arrays([indices_array], names=["indices"])
+
+ with writer:
+ try:
+ writer.write_table(indices_table)
+ writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file
+ except (Exception, KeyboardInterrupt):
+ if tmp_file is not None:
+ tmp_file.close()
+ if os.path.exists(tmp_file.name):
+ os.remove(tmp_file.name)
+ raise
+
+ if tmp_file is not None:
+ tmp_file.close()
+ shutil.move(tmp_file.name, indices_cache_file_name)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(indices_cache_file_name, 0o666 & ~umask)
+
+ # Return new Dataset object
+ if buf_writer is None:
+ return self._new_dataset_with_indices(
+ indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint
+ )
+ else:
+ return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)
+
+ def skip(self, n: int) -> "Dataset":
+ """
+ Create a new [`Dataset`] that skips the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to skip.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.skip(1)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'},
+ {'label': 1,
+ 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
+ ```
+ """
+ return self.select(range(n, len(self)))
+
+ def take(self, n: int) -> "Dataset":
+ """
+ Create a new [`Dataset`] with only the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to take.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> small_ds = ds.take(2)
+ >>> list(small_ds)
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
+ ```
+ """
+ return self.select(range(n))
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"])
+ def sort(
+ self,
+ column_names: Union[str, Sequence_[str]],
+ reverse: Union[bool, Sequence_[bool]] = False,
+ kind="deprecated",
+ null_placement: str = "at_end",
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset sorted according to a single or multiple columns.
+
+ Args:
+ column_names (`Union[str, Sequence[str]]`):
+ Column name(s) to sort by.
+ reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
+ If `True`, sort by descending order rather than ascending. If a single bool is provided,
+ the value is applied to the sorting of all column names. Otherwise a list of bools with the
+ same length and order as column_names must be provided.
+ kind (`str`, *optional*):
+ Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
+ The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general,
+ the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
+
+
+ `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
+
+
+ null_placement (`str`, defaults to `at_end`):
+ Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
+
+
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the sorted indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the sorted indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ sorted indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ Higher value gives smaller cache files, lower value consume less temporary memory.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='validation')
+ >>> ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> sorted_ds = ds.sort('label')
+ >>> sorted_ds['label'][:10]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
+ >>> another_sorted_ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # Deprecation warning
+ if kind != "deprecated":
+ warnings.warn(
+ "'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+
+ # Check proper format of and for duplicates in column_names
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ # Check proper format and length of reverse
+ if not isinstance(reverse, bool):
+ if len(reverse) != len(column_names):
+ raise ValueError(
+ "Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'."
+ )
+ else:
+ reverse = [reverse] * len(column_names)
+
+ # Check whether column name(s) exist in dataset
+ for column in column_names:
+ if not isinstance(column, str) or column not in self._data.column_names:
+ raise ValueError(
+ f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}"
+ )
+
+ # Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability
+ if null_placement not in ["at_start", "at_end"]:
+ if null_placement == "first":
+ null_placement = "at_start"
+ elif null_placement == "last":
+ null_placement = "at_end"
+ else:
+ raise ValueError(
+ f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'."
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+ indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
+ if os.path.exists(indices_cache_file_name) and load_from_cache_file:
+ logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}")
+ return self._new_dataset_with_indices(
+ fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
+ )
+
+ sort_table = query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ )
+
+ sort_keys = [
+ (col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse)
+ ]
+
+ indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement)
+
+ return self.select(
+ indices=indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]
+ )
+ def shuffle(
+ self,
+ seed: Optional[int] = None,
+ generator: Optional[np.random.Generator] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new Dataset where the rows are shuffled.
+
+ Currently shuffling uses numpy random generators.
+ You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
+
+ Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
+ However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
+ This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
+ To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
+ This may take a lot of time depending of the size of your dataset though:
+
+ ```python
+ my_dataset[0] # fast
+ my_dataset = my_dataset.shuffle(seed=42)
+ my_dataset[0] # up to 10x slower
+ my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
+ my_dataset[0] # fast again
+ ```
+
+ In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
+ It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal:
+
+ ```python
+ my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128)
+ for example in enumerate(my_iterable_dataset): # fast
+ pass
+
+ shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
+
+ for example in enumerate(shuffled_iterable_dataset): # as fast as before
+ pass
+ ```
+
+ Args:
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ keep_in_memory (`bool`, default `False`):
+ Keep the shuffled indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the shuffled indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ shuffled indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+
+ # set a seed
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> shuffled_ds['label'][:10]
+ [1, 0, 1, 1, 0, 0, 0, 0, 0, 0]
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if seed is not None and generator is not None:
+ raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.")
+
+ if generator is not None and not isinstance(generator, np.random.Generator):
+ raise ValueError("The provided generator must be an instance of numpy.random.Generator")
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if generator is None:
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ _ = np.random.random() # do 1 step of rng
+ generator = np.random.default_rng(seed)
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+ indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
+ if os.path.exists(indices_cache_file_name) and load_from_cache_file:
+ logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}")
+ return self._new_dataset_with_indices(
+ fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
+ )
+
+ permutation = generator.permutation(len(self))
+
+ return self.select(
+ indices=permutation,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False,
+ randomized_function=True,
+ fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"],
+ ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"],
+ )
+ def train_test_split(
+ self,
+ test_size: Union[float, int, None] = None,
+ train_size: Union[float, int, None] = None,
+ shuffle: bool = True,
+ stratify_by_column: Optional[str] = None,
+ seed: Optional[int] = None,
+ generator: Optional[np.random.Generator] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ train_indices_cache_file_name: Optional[str] = None,
+ test_indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ train_new_fingerprint: Optional[str] = None,
+ test_new_fingerprint: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits).
+ Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.
+
+ This method is similar to scikit-learn `train_test_split`.
+
+ Args:
+ test_size (`numpy.random.Generator`, *optional*):
+ Size of the test split
+ If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split.
+ If `int`, represents the absolute number of test samples.
+ If `None`, the value is set to the complement of the train size.
+ If `train_size` is also `None`, it will be set to `0.25`.
+ train_size (`numpy.random.Generator`, *optional*):
+ Size of the train split
+ If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split.
+ If `int`, represents the absolute number of train samples.
+ If `None`, the value is automatically set to the complement of the test size.
+ shuffle (`bool`, *optional*, defaults to `True`):
+ Whether or not to shuffle the data before splitting.
+ stratify_by_column (`str`, *optional*, defaults to `None`):
+ The column name of labels to be used to perform stratified split of data.
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the splits indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the splits indices
+ can be identified, use it instead of recomputing.
+ train_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ train split indices instead of the automatically generated cache file name.
+ test_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ test split indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ train_new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the train set after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ test_new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the test set after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.train_test_split(test_size=0.2, shuffle=True)
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 852
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 214
+ })
+ })
+
+ # set a seed
+ >>> ds = ds.train_test_split(test_size=0.2, seed=42)
+
+ # stratified split
+ >>> ds = load_dataset("imdb",split="train")
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 25000
+ })
+ >>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label")
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 20000
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 5000
+ })
+ })
+ ```
+ """
+ from .dataset_dict import DatasetDict # import here because of circular dependency
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return DatasetDict({"train": self, "test": self})
+
+ if test_size is None and train_size is None:
+ test_size = 0.25
+
+ # Safety checks similar to scikit-learn's ones.
+ # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)
+ n_samples = len(self)
+ if (
+ isinstance(test_size, int)
+ and (test_size >= n_samples or test_size <= 0)
+ or isinstance(test_size, float)
+ and (test_size <= 0 or test_size >= 1)
+ ):
+ raise ValueError(
+ f"test_size={test_size} should be either positive and smaller "
+ f"than the number of samples {n_samples} or a float in the (0, 1) range"
+ )
+
+ if (
+ isinstance(train_size, int)
+ and (train_size >= n_samples or train_size <= 0)
+ or isinstance(train_size, float)
+ and (train_size <= 0 or train_size >= 1)
+ ):
+ raise ValueError(
+ f"train_size={train_size} should be either positive and smaller "
+ f"than the number of samples {n_samples} or a float in the (0, 1) range"
+ )
+
+ if train_size is not None and not isinstance(train_size, (int, float)):
+ raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}")
+ if test_size is not None and not isinstance(test_size, (int, float)):
+ raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}")
+
+ if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:
+ raise ValueError(
+ f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)"
+ " range. Reduce test_size and/or train_size."
+ )
+
+ if isinstance(test_size, float):
+ n_test = ceil(test_size * n_samples)
+ elif isinstance(test_size, int):
+ n_test = float(test_size)
+
+ if isinstance(train_size, float):
+ n_train = floor(train_size * n_samples)
+ elif isinstance(train_size, int):
+ n_train = float(train_size)
+
+ if train_size is None:
+ n_train = n_samples - n_test
+ elif test_size is None:
+ n_test = n_samples - n_train
+
+ if n_train + n_test > n_samples:
+ raise ValueError(
+ f"The sum of train_size and test_size = {n_train + n_test}, "
+ "should be smaller than the number of "
+ f"samples {n_samples}. Reduce test_size and/or "
+ "train_size."
+ )
+
+ n_train, n_test = int(n_train), int(n_test)
+
+ if n_train == 0:
+ raise ValueError(
+ f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the "
+ "resulting train set will be empty. Adjust any of the "
+ "aforementioned parameters."
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if generator is None and shuffle is True:
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ _ = np.random.random() # do 1 step of rng
+ generator = np.random.default_rng(seed)
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if train_indices_cache_file_name is None or test_indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+
+ if train_indices_cache_file_name is None:
+ train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)
+ if test_indices_cache_file_name is None:
+ test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)
+ if (
+ os.path.exists(train_indices_cache_file_name)
+ and os.path.exists(test_indices_cache_file_name)
+ and load_from_cache_file
+ ):
+ logger.info(
+ f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}"
+ )
+ return DatasetDict(
+ {
+ "train": self._new_dataset_with_indices(
+ fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name
+ ),
+ "test": self._new_dataset_with_indices(
+ fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name
+ ),
+ }
+ )
+ if not shuffle:
+ if stratify_by_column is not None:
+ raise ValueError("Stratified train/test split is not implemented for `shuffle=False`")
+ train_indices = np.arange(n_train)
+ test_indices = np.arange(n_train, n_train + n_test)
+ else:
+ # stratified partition
+ if stratify_by_column is not None:
+ if stratify_by_column not in self._info.features.keys():
+ raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}")
+ if not isinstance(self._info.features[stratify_by_column], ClassLabel):
+ raise ValueError(
+ f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}."
+ )
+ try:
+ train_indices, test_indices = next(
+ stratified_shuffle_split_generate_indices(
+ self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator
+ )
+ )
+ except Exception as error:
+ if str(error) == "Minimum class count error":
+ raise ValueError(
+ f"The least populated class in {stratify_by_column} column has only 1"
+ " member, which is too few. The minimum"
+ " number of groups for any class cannot"
+ " be less than 2."
+ )
+ else:
+ raise error
+
+ # random partition
+ else:
+ permutation = generator.permutation(len(self))
+ test_indices = permutation[:n_test]
+ train_indices = permutation[n_test : (n_test + n_train)]
+
+ train_split = self.select(
+ indices=train_indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=train_indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=train_new_fingerprint,
+ )
+ test_split = self.select(
+ indices=test_indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=test_indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=test_new_fingerprint,
+ )
+
+ return DatasetDict({"train": train_split, "test": test_split})
+
+ def shard(
+ self,
+ num_shards: int,
+ index: int,
+ contiguous: bool = False,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "Dataset":
+ """Return the `index`-nth shard from dataset split into `num_shards` pieces.
+
+ This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose
+ index mod `n = i`.
+
+ `dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks,
+ so it can be easily concatenated back together after processing. If `n % i == l`, then the
+ first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`.
+ `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return
+ a dataset with the same order as the original.
+
+ Be sure to shard before using any randomizing operator (such as `shuffle`).
+ It is best if the shard operator is used early in the dataset pipeline.
+
+
+ Args:
+ num_shards (`int`):
+ How many shards to split the dataset into.
+ index (`int`):
+ Which shard to select and return.
+ contiguous: (`bool`, defaults to `False`):
+ Whether to select contiguous blocks of indices for shards.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ indices of each shard instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 1066
+ })
+ >>> ds.shard(num_shards=2, index=0)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ ```
+ """
+ if not 0 <= index < num_shards:
+ raise ValueError("index should be in [0, num_shards-1]")
+ if contiguous:
+ div = len(self) // num_shards
+ mod = len(self) % num_shards
+ start = div * index + min(index, mod)
+ end = start + div + (1 if index < mod else 0)
+ indices = range(start, end)
+ else:
+ indices = np.arange(index, len(self), num_shards)
+
+ return self.select(
+ indices=indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ )
+
+ @deprecated()
+ def export(
+ self,
+ filename: str,
+ format: str = "tfrecord",
+ ):
+ """Writes the Arrow dataset to a TFRecord file.
+
+ The dataset must already be in tensorflow format. The records will be written with
+ keys from `dataset._format_columns`.
+
+ Args:
+ filename (`str`): The filename, including the `.tfrecord` extension, to write to.
+ format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as
+ TFRecords are the only option. This enables a more flexible function signature later.
+ """
+ try:
+ import tensorflow as tf # noqa: F401
+ except ImportError:
+ logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
+
+ # From https://www.tensorflow.org/tutorials/load_data/tfrecord
+ def _bytes_feature(values):
+ """Returns a bytes_list from a list of string / byte."""
+ return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
+
+ def _float_feature(values):
+ """Returns a float_list from a list of float / double."""
+ return tf.train.Feature(float_list=tf.train.FloatList(value=values))
+
+ def _int64_feature(values):
+ """Returns an int64_list from a list of bool / enum / int / uint."""
+ return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
+
+ def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature":
+ """Typechecks `values` and returns the corresponding tf.train.Feature."""
+ if isinstance(values, list):
+ if values and isinstance(values[0], str):
+ return _bytes_feature([v.encode() for v in values])
+ else:
+ raise ValueError(f"values={values} is empty or contains items that cannot be serialized")
+ elif isinstance(values, np.ndarray):
+ if values.dtype == np.dtype(float):
+ return _float_feature(values)
+ elif values.dtype == np.int64:
+ return _int64_feature(values)
+ elif values.dtype == np.dtype(str) or (
+ values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str)
+ ):
+ return _bytes_feature([v.encode() for v in values])
+ else:
+ raise ValueError(
+ f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized"
+ )
+ elif hasattr(values, "dtype"):
+ if np.issubdtype(values.dtype, np.floating):
+ return _float_feature([values.item()])
+ elif np.issubdtype(values.dtype, np.integer):
+ return _int64_feature([values.item()])
+ elif np.issubdtype(values.dtype, str):
+ return _bytes_feature([values.item().encode()])
+ else:
+ raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized")
+ else:
+ raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized")
+
+ def serialize_example(ex):
+ feature = {key: _feature(value) for key, value in ex.items()}
+ example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
+ return example_proto.SerializeToString()
+
+ def tf_serialize_example(ex):
+ tf_string = tf.py_function(serialize_example, (ex,), tf.string)
+ return tf.reshape(tf_string, ())
+
+ def generator():
+ for ex in self:
+ yield serialize_example(ex)
+
+ if self._format_type != "numpy":
+ raise ValueError("Dataset format must be numpy before exporting")
+ if not filename.endswith(".tfrecord"):
+ raise ValueError("filename {filename} must end with .tfrecord")
+ tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=())
+ writer = tf.data.experimental.TFRecordWriter(filename)
+ logger.info(f"Writing TFRecord to {filename}")
+ writer.write(tf_dataset)
+ logger.info(f"Finished writing TFRecord to {filename}")
+ self = None # delete the dataset reference used by tf_dataset
+
+ def to_csv(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **to_csv_kwargs,
+ ) -> int:
+ """Exports the dataset to csv
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file (e.g. `file.csv`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.csv`),
+ or a BinaryIO, where the dataset will be saved to in the specified format.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing. `batch_size` in this case defaults to
+ `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
+ value if you have sufficient compute power.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+ **to_csv_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
+
+
+
+ Now, `index` defaults to `False` if not specified.
+
+ If you would like to write the index, pass `index=True` and also set a name for the index column by
+ passing `index_label`.
+
+
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_csv("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetWriter
+
+ return CsvDatasetWriter(
+ self,
+ path_or_buf,
+ batch_size=batch_size,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ **to_csv_kwargs,
+ ).write()
+
+ def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]:
+ """Returns the dataset as a Python dict. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+
+
+
+ Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.
+
+
+
+ batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+
+ Returns:
+ `dict` or `Iterator[dict]`
+
+ Example:
+
+ ```py
+ >>> ds.to_dict()
+ ```
+ """
+ if batched != "deprecated":
+ warnings.warn(
+ "'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.",
+ FutureWarning,
+ )
+ else:
+ batched = False
+
+ if not batched:
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pydict()
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices,
+ ).to_pydict()
+ for offset in range(0, len(self), batch_size)
+ )
+
+ def to_list(self) -> list:
+ """Returns the dataset as a Python list.
+
+ Returns:
+ `list`
+
+ Example:
+
+ ```py
+ >>> ds.to_list()
+ ```
+ """
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pylist()
+
+ def to_json(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **to_json_kwargs,
+ ) -> int:
+ """Export the dataset to JSON Lines or JSON.
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file (e.g. `file.json`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.json`),
+ or a BinaryIO, where the dataset will be saved to in the specified format.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing. `batch_size` in this case defaults to
+ `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
+ value if you have sufficient compute power.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+ **to_json_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
+
+
+
+ Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`.
+
+ If you would like to write the index, pass `index=True`.
+
+
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_json("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetWriter
+
+ return JsonDatasetWriter(
+ self,
+ path_or_buf,
+ batch_size=batch_size,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ **to_json_kwargs,
+ ).write()
+
+ def to_pandas(
+ self, batch_size: Optional[int] = None, batched: bool = False
+ ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
+ """Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+ batch_size (`int`, *optional*):
+ The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+
+ Returns:
+ `pandas.DataFrame` or `Iterator[pandas.DataFrame]`
+
+ Example:
+
+ ```py
+ >>> ds.to_pandas()
+ ```
+ """
+ if not batched:
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pandas(types_mapper=pandas_types_mapper)
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices,
+ ).to_pandas(types_mapper=pandas_types_mapper)
+ for offset in range(0, len(self), batch_size)
+ )
+
+ def to_polars(
+ self,
+ batch_size: Optional[int] = None,
+ batched: bool = False,
+ schema_overrides: Optional[dict] = None,
+ rechunk: bool = True,
+ ) -> Union["pl.DataFrame", Iterator["pl.DataFrame"]]:
+ """Returns the dataset as a `polars.DataFrame`. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+ batch_size (`int`, *optional*):
+ The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `genomicsml.datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ schema_overrides (`dict`, *optional*):
+ Support type specification or override of one or more columns; note that
+ any dtypes inferred from the schema param will be overridden.
+ rechunk (`bool`):
+ Make sure that all data is in contiguous memory. Defaults to `True`.
+ Returns:
+ `polars.DataFrame` or `Iterator[polars.DataFrame]`
+
+ Example:
+
+ ```py
+ >>> ds.to_polars()
+ ```
+ """
+ if config.POLARS_AVAILABLE:
+ import polars as pl
+
+ if not batched:
+ return pl.from_arrow(
+ query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices if self._indices is not None else None,
+ ),
+ schema_overrides=schema_overrides,
+ rechunk=rechunk,
+ )
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ pl.from_arrow(
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices if self._indices is not None else None,
+ ),
+ schema_overrides=schema_overrides,
+ rechunk=rechunk,
+ )
+ for offset in range(0, len(self), batch_size)
+ )
+ else:
+ raise ValueError("Polars needs to be installed to be able to return Polars dataframes.")
+
+ def to_parquet(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **parquet_writer_kwargs,
+ ) -> int:
+ """Exports the dataset to parquet
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file (e.g. `file.parquet`), a remote URI (e.g. `hf://datasets/username/my_dataset_name/data.parquet`),
+ or a BinaryIO, where the dataset will be saved to in the specified format.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+ **parquet_writer_kwargs (additional keyword arguments):
+ Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`.
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_parquet("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetWriter
+
+ return ParquetDatasetWriter(
+ self, path_or_buf, batch_size=batch_size, storage_options=storage_options, **parquet_writer_kwargs
+ ).write()
+
+ def to_sql(
+ self,
+ name: str,
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ batch_size: Optional[int] = None,
+ **sql_writer_kwargs,
+ ) -> int:
+ """Exports the dataset to a SQL database.
+
+ Args:
+ name (`str`):
+ Name of SQL table.
+ con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
+ A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ **sql_writer_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html).
+
+
+
+ Now, `index` defaults to `False` if not specified.
+
+ If you would like to write the index, pass `index=True` and also set a name for the index column by
+ passing `index_label`.
+
+
+
+ Returns:
+ `int`: The number of records written.
+
+ Example:
+
+ ```py
+ >>> # con provided as a connection URI string
+ >>> ds.to_sql("data", "sqlite:///my_own_db.sql")
+ >>> # con provided as a sqlite3 connection object
+ >>> import sqlite3
+ >>> con = sqlite3.connect("my_own_db.sql")
+ >>> with con:
+ ... ds.to_sql("data", con)
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.sql import SqlDatasetWriter
+
+ return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write()
+
+ def _estimate_nbytes(self) -> int:
+ dataset_nbytes = self.data.nbytes
+
+ # Find decodable columns, because if there are any, we need to
+ # adjust the dataset size computation (needed for sharding) to account for possible external files
+ decodable_columns = [
+ k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)
+ ]
+
+ if decodable_columns:
+ # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples
+ extra_nbytes = 0
+
+ def extra_nbytes_visitor(array, feature):
+ nonlocal extra_nbytes
+ if isinstance(feature, (Audio, Image)):
+ for x in array.to_pylist():
+ if x is not None and x["bytes"] is None and x["path"] is not None:
+ size = xgetsize(x["path"])
+ extra_nbytes += size
+ extra_nbytes -= array.field("path").nbytes
+
+ table = self.with_format("arrow")[:1000]
+ table_visitor(table, extra_nbytes_visitor)
+
+ extra_nbytes = extra_nbytes * len(self.data) / len(table)
+ dataset_nbytes = dataset_nbytes + extra_nbytes
+
+ if self._indices is not None:
+ dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data)
+ return dataset_nbytes
+
+ @staticmethod
+ def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int):
+ for shard_idx, shard in enumerate(shards):
+ for pa_table in shard.with_format("arrow").iter(batch_size):
+ yield shard_idx, pa_table
+
+ @staticmethod
+ def _generate_tables_from_cache_file(filename: str):
+ for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)):
+ yield batch_idx, pa.Table.from_batches([batch])
+
+ def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset":
+ """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`].
+ This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files.
+
+ Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop).
+ Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets.
+ All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset.
+
+ Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`].
+ This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough.
+
+ To get the best speed performance, make sure your dataset doesn't have an indices mapping.
+ If this is the case, the data are not read contiguously, which can be slow sometimes.
+ You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset.
+
+ Args:
+ num_shards (`int`, default to `1`):
+ Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly,
+ and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example.
+ Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk.
+
+ Returns:
+ [`datasets.IterableDataset`]
+
+ Example:
+
+ Basic usage:
+ ```python
+ >>> ids = ds.to_iterable_dataset()
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With lazy filtering and processing:
+ ```python
+ >>> ids = ds.to_iterable_dataset()
+ >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With sharding to enable efficient shuffling:
+ ```python
+ >>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With a PyTorch DataLoader:
+ ```python
+ >>> import torch
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.filter(filter_fn).map(process_fn)
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With a PyTorch DataLoader and shuffling:
+ ```python
+ >>> import torch
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling
+ ```python
+ >>> from datasets.distributed import split_dataset_by_node
+ >>> ids = ds.to_iterable_dataset(num_shards=512)
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With shuffling and multiple epochs:
+ ```python
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> for epoch in range(n_epochs):
+ ... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating
+ ... for example in ids:
+ ... pass
+ ```
+ Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups.
+ """
+ from .iterable_dataset import ArrowExamplesIterable, IterableDataset
+
+ if self._format_type is not None:
+ raise NotImplementedError(
+ "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset"
+ )
+ if num_shards > len(self):
+ raise ValueError(
+ f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)."
+ )
+ if self._indices is not None:
+ logger.info(
+ "Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. "
+ "You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed."
+ )
+ shards = (
+ [copy.deepcopy(self)]
+ if num_shards == 1
+ else [
+ self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards)
+ ]
+ )
+ ex_iterable = ArrowExamplesIterable(
+ Dataset._generate_tables_from_shards,
+ kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE},
+ )
+ return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features))
+
+ def _push_parquet_shards_to_hub(
+ self,
+ repo_id: str,
+ data_dir: str = "data",
+ split: Optional[str] = None,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[int] = None,
+ embed_external_files: bool = True,
+ ) -> Tuple[str, str, int, int, List[str], int]:
+ """Pushes the dataset shards as Parquet files to the hub.
+
+ Returns:
+ additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards
+ uploaded_size (`int`): number of uploaded bytes to the repository
+ dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression
+ """
+ # Find decodable columns, because if there are any, we need to:
+ # embed the bytes from the files in the shards
+ decodable_columns = (
+ [k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)]
+ if embed_external_files
+ else []
+ )
+
+ dataset_nbytes = self._estimate_nbytes()
+
+ if num_shards is None:
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
+ num_shards = max(num_shards, 1)
+
+ shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
+
+ if decodable_columns:
+
+ def shards_with_embedded_external_files(shards):
+ for shard in shards:
+ format = shard.format
+ shard = shard.with_format("arrow")
+ shard = shard.map(
+ embed_table_storage,
+ batched=True,
+ batch_size=1000,
+ keep_in_memory=True,
+ )
+ shard = shard.with_format(**format)
+ yield shard
+
+ shards = shards_with_embedded_external_files(shards)
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ uploaded_size = 0
+ additions = []
+ for index, shard in hf_tqdm(
+ enumerate(shards),
+ desc="Uploading the dataset shards",
+ total=num_shards,
+ ):
+ shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet"
+ buffer = BytesIO()
+ shard.to_parquet(buffer)
+ uploaded_size += buffer.tell()
+ shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer)
+ api.preupload_lfs_files(
+ repo_id=repo_id,
+ additions=[shard_addition],
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ additions.append(shard_addition)
+
+ return additions, uploaded_size, dataset_nbytes
+
+ def push_to_hub(
+ self,
+ repo_id: str,
+ config_name: str = "default",
+ set_default: Optional[bool] = None,
+ split: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ commit_description: Optional[str] = None,
+ private: Optional[bool] = False,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ branch="deprecated",
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[int] = None,
+ embed_external_files: bool = True,
+ ) -> CommitInfo:
+ """Pushes the dataset to the hub as a Parquet dataset.
+ The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
+
+ The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`]
+ data, the Parquet files will store the bytes of your images or audio files.
+ You can disable this by setting `embed_external_files` to `False`.
+
+ Args:
+ repo_id (`str`):
+ The ID of the repository to push to in the following format: `/` or
+ `/`. Also accepts ``, which will default to the namespace
+ of the logged-in user.
+ config_name (`str`, defaults to "default"):
+ The configuration name (or subset) of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
+ split (`str`, *optional*):
+ The name of the split that will be given to that dataset. Defaults to `self.split`.
+ data_dir (`str`, *optional*):
+ Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
+ from "default", else "data".
+
+
+ commit_message (`str`, *optional*):
+ Message to commit while pushing. Will default to `"Upload dataset"`.
+ commit_description (`str`, *optional*):
+ Description of the commit that will be created.
+ Additionally, description of the PR if a PR is created (`create_pr` is True).
+
+
+ private (`bool`, *optional*, defaults to `False`):
+ Whether the dataset repository should be set to private or not. Only affects repository creation:
+ a repository that already exists will not be affected by that parameter.
+ token (`str`, *optional*):
+ An optional authentication token for the Hugging Face Hub. If no token is passed, will default
+ to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
+ if no token is passed and the user is not logged-in.
+ revision (`str`, *optional*):
+ Branch to push the uploaded files to. Defaults to the `"main"` branch.
+
+
+ branch (`str`, *optional*):
+ The git branch on which to push the dataset. This defaults to the default branch as specified
+ in your repository, which defaults to `"main"`.
+
+
+
+ `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0.
+
+
+ create_pr (`bool`, *optional*, defaults to `False`):
+ Whether to create a PR with the uploaded files or directly commit.
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by
+ a unit (like `"5MB"`).
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
+
+
+ embed_external_files (`bool`, defaults to `True`):
+ Whether to embed file bytes in the shards.
+ In particular, this will do the following before the push for the fields of type:
+
+ - [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files.
+
+ Return:
+ huggingface_hub.CommitInfo
+
+ Example:
+
+ ```python
+ >>> dataset.push_to_hub("/")
+ >>> dataset_dict.push_to_hub("/", private=True)
+ >>> dataset.push_to_hub("/", max_shard_size="1GB")
+ >>> dataset.push_to_hub("/", num_shards=1024)
+ ```
+
+ If your dataset has multiple splits (e.g. train/validation/test):
+
+ ```python
+ >>> train_dataset.push_to_hub("/", split="train")
+ >>> val_dataset.push_to_hub("/", split="validation")
+ >>> # later
+ >>> dataset = load_dataset("/")
+ >>> train_dataset = dataset["train"]
+ >>> val_dataset = dataset["validation"]
+ ```
+
+ If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
+
+ ```python
+ >>> english_dataset.push_to_hub("/", "en")
+ >>> french_dataset.push_to_hub("/", "fr")
+ >>> # later
+ >>> english_dataset = load_dataset("/", "en")
+ >>> french_dataset = load_dataset("/", "fr")
+ ```
+ """
+ if config_name == "data":
+ raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.")
+
+ if max_shard_size is not None and num_shards is not None:
+ raise ValueError(
+ "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
+ )
+
+ if split is None:
+ split = str(self.split) if self.split is not None else "train"
+
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
+
+ if branch != "deprecated":
+ warnings.warn(
+ "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'revision={branch}' instead.",
+ FutureWarning,
+ )
+ revision = branch
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ repo_url = api.create_repo(
+ repo_id,
+ token=token,
+ repo_type="dataset",
+ private=private,
+ exist_ok=True,
+ )
+ repo_id = repo_url.repo_id
+
+ if revision is not None:
+ api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
+
+ if not data_dir:
+ data_dir = config_name if config_name != "default" else "data" # for backward compatibility
+
+ additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub(
+ repo_id=repo_id,
+ data_dir=data_dir,
+ split=split,
+ token=token,
+ revision=revision,
+ max_shard_size=max_shard_size,
+ num_shards=num_shards,
+ create_pr=create_pr,
+ embed_external_files=embed_external_files,
+ )
+
+ # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
+ # and delete old split shards (if they exist)
+ repo_with_dataset_card, repo_with_dataset_infos = False, False
+ deletions, deleted_size = [], 0
+ repo_splits = [] # use a list to keep the order of the splits
+ repo_files_to_add = [addition.path_in_repo for addition in additions]
+ for repo_file in api.list_repo_tree(
+ repo_id=repo_id, revision=revision, repo_type="dataset", token=token, recursive=True
+ ):
+ if not isinstance(repo_file, RepoFile):
+ continue
+ if repo_file.rfilename == config.REPOCARD_FILENAME:
+ repo_with_dataset_card = True
+ elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
+ repo_with_dataset_infos = True
+ elif (
+ repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add
+ ):
+ deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
+ deleted_size += repo_file.size
+ elif fnmatch.fnmatch(
+ repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
+ ):
+ repo_split = string_to_dict(
+ repo_file.rfilename,
+ glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
+ )["split"]
+ if repo_split not in repo_splits:
+ repo_splits.append(repo_split)
+
+ organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
+ info_to_dump = self.info.copy()
+ info_to_dump.download_checksums = None
+ info_to_dump.download_size = uploaded_size
+ info_to_dump.dataset_size = dataset_nbytes
+ info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes
+ info_to_dump.config_name = config_name
+ info_to_dump.splits = SplitDict(
+ {split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)}
+ )
+ # get the info from the README to update them
+ if repo_with_dataset_card:
+ dataset_card_path = api.hf_hub_download(
+ repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
+ )
+ dataset_card = DatasetCard.load(Path(dataset_card_path))
+ dataset_card_data = dataset_card.data
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ if dataset_infos and config_name in dataset_infos:
+ repo_info = dataset_infos[config_name]
+ else:
+ repo_info = None
+ # get the deprecated dataset_infos.json to update them
+ elif repo_with_dataset_infos:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None
+ repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ repo_info = None
+ # update the total info to dump from existing info
+ if repo_info is not None:
+ logger.info("Updating downloaded metadata with the new split.")
+ if repo_info.splits and list(repo_info.splits) != [split]:
+ if self._info.features != repo_info.features:
+ raise ValueError(
+ f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}"
+ )
+
+ if split in repo_info.splits:
+ repo_info.download_size -= deleted_size
+ repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0
+
+ repo_info.download_checksums = None
+ repo_info.download_size = (repo_info.download_size or 0) + uploaded_size
+ repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes
+ repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size
+ repo_info.splits.pop(split, None)
+ repo_info.splits[split] = SplitInfo(
+ split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name
+ )
+ info_to_dump = repo_info
+ # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
+ if not metadata_configs and repo_splits:
+ default_metadata_configs_to_dump = {
+ "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
+ }
+ MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
+ # update the metadata configs
+ if config_name in metadata_configs:
+ metadata_config = metadata_configs[config_name]
+ if "data_files" in metadata_config:
+ data_files_to_dump = sanitize_patterns(metadata_config["data_files"])
+ else:
+ data_files_to_dump = {}
+ # add the new split
+ data_files_to_dump[split] = [f"{data_dir}/{split}-*"]
+ metadata_config_to_dump = {
+ "data_files": [
+ {
+ "split": _split,
+ "path": _pattern[0] if len(_pattern) == 1 else _pattern,
+ }
+ for _split, _pattern in data_files_to_dump.items()
+ ]
+ }
+ else:
+ metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]}
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
+ # push to the deprecated dataset_infos.json
+ if repo_with_dataset_infos:
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_infos[config_name] = asdict(info_to_dump)
+ buffer = BytesIO()
+ buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
+ )
+ # push to README
+ DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
+ MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
+ dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
+ )
+
+ commit_message = commit_message if commit_message is not None else "Upload dataset"
+ if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
+ commit_info = api.create_commit(
+ repo_id,
+ operations=additions + deletions,
+ commit_message=commit_message,
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ else:
+ logger.info(
+ f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
+ )
+ num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
+ for i in range(0, num_commits):
+ operations = additions[
+ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
+ ] + (deletions if i == 0 else [])
+ commit_info = api.create_commit(
+ repo_id,
+ operations=operations,
+ commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ logger.info(
+ f"Commit #{i+1} completed"
+ + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ + "."
+ )
+ return commit_info
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str):
+ """Add column to Dataset.
+
+
+
+ Args:
+ name (`str`):
+ Column name.
+ column (`list` or `np.array`):
+ Column data to be added.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> more_text = ds["text"]
+ >>> ds.add_column(name="text_2", column=more_text)
+ Dataset({
+ features: ['text', 'label', 'text_2'],
+ num_rows: 1066
+ })
+ ```
+ """
+ column_table = InMemoryTable.from_pydict({name: column})
+ _check_column_names(self._data.column_names + column_table.column_names)
+ dataset = self.flatten_indices() if self._indices is not None else self
+ # Concatenate tables horizontally
+ table = concat_tables([dataset._data, column_table], axis=1)
+ # Update features
+ info = dataset.info.copy()
+ info.features.update(Features.from_arrow_schema(column_table.schema))
+ table = update_metadata_with_features(table, info.features)
+ return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint)
+
+ def add_faiss_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ device: Optional[int] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None, # noqa: F821
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ dtype=np.float32,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ By default the index is done over the vectors of the specified column.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+
+ - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
+
+ Args:
+ column (`str`):
+ The column of the vectors to add to the index.
+ index_name (`str`, *optional*):
+ The `index_name`/identifier of the index.
+ This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
+ By default it corresponds to `column`.
+ device (`Union[int, List[int]]`, *optional*):
+ If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (`str`, *optional*):
+ This is passed to the index factory of Faiss to create the index.
+ Default index class is `IndexFlat`.
+ metric_type (`int`, *optional*):
+ Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (`faiss.Index`, *optional*):
+ Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (`int`):
+ Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`.
+
+ train_size (`int`, *optional*):
+ If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to `False`):
+ Enable the verbosity of the Faiss index.
+ dtype (`data-type`):
+ The dtype of the numpy arrays that are indexed.
+ Default is `np.float32`.
+
+ Example:
+
+ ```python
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))
+ >>> ds_with_embeddings.add_faiss_index(column='embeddings')
+ >>> # query
+ >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)
+ >>> # save index
+ >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')
+
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> # load index
+ >>> ds.load_faiss_index('embeddings', 'my_index.faiss')
+ >>> # query
+ >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)
+ ```
+ """
+ with self.formatted_as(type="numpy", columns=[column], dtype=dtype):
+ super().add_faiss_index(
+ column=column,
+ index_name=index_name,
+ device=device,
+ string_factory=string_factory,
+ metric_type=metric_type,
+ custom_index=custom_index,
+ batch_size=batch_size,
+ train_size=train_size,
+ faiss_verbose=faiss_verbose,
+ )
+ return self
+
+ def add_faiss_index_from_external_arrays(
+ self,
+ external_arrays: np.array,
+ index_name: str,
+ device: Optional[int] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None, # noqa: F821
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ dtype=np.float32,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of `external_arrays`.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+
+ - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
+
+ Args:
+ external_arrays (`np.array`):
+ If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
+ It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
+ device (Optional `Union[int, List[int]]`, *optional*):
+ If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (`str`, *optional*):
+ This is passed to the index factory of Faiss to create the index.
+ Default index class is `IndexFlat`.
+ metric_type (`int`, *optional*):
+ Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (`faiss.Index`, *optional*):
+ Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (`int`, *optional*):
+ Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (`int`, *optional*):
+ If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False):
+ Enable the verbosity of the Faiss index.
+ dtype (`numpy.dtype`):
+ The dtype of the numpy arrays that are indexed. Default is np.float32.
+ """
+ super().add_faiss_index_from_external_arrays(
+ external_arrays=external_arrays.astype(dtype),
+ index_name=index_name,
+ device=device,
+ string_factory=string_factory,
+ metric_type=metric_type,
+ custom_index=custom_index,
+ batch_size=batch_size,
+ train_size=train_size,
+ faiss_verbose=faiss_verbose,
+ )
+
+ def add_elasticsearch_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Add a text index using ElasticSearch for fast retrieval. This is done in-place.
+
+ Args:
+ column (`str`):
+ The column of the documents to add to the index.
+ index_name (`str`, *optional*):
+ The `index_name`/identifier of the index.
+ This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`~Dataset.search`].
+ By default it corresponds to `column`.
+ host (`str`, *optional*, defaults to `localhost`):
+ Host of where ElasticSearch is running.
+ port (`str`, *optional*, defaults to `9200`):
+ Port of where ElasticSearch is running.
+ es_client (`elasticsearch.Elasticsearch`, *optional*):
+ The elasticsearch client used to create the index if host and port are `None`.
+ es_index_name (`str`, *optional*):
+ The elasticsearch index name used to create the index.
+ es_index_config (`dict`, *optional*):
+ The configuration of the elasticsearch index.
+ Default config is:
+ ```
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ ```
+ Example:
+
+ ```python
+ >>> es_client = elasticsearch.Elasticsearch()
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index")
+ >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)
+ ```
+ """
+ with self.formatted_as(type=None, columns=[column]):
+ super().add_elasticsearch_index(
+ column=column,
+ index_name=index_name,
+ host=host,
+ port=port,
+ es_client=es_client,
+ es_index_name=es_index_name,
+ es_index_config=es_index_config,
+ )
+ return self
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def add_item(self, item: dict, new_fingerprint: str):
+ """Add item to Dataset.
+
+
+
+ Args:
+ item (`dict`):
+ Item data to be added.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
+ >>> ds = ds.add_item(new_review)
+ >>> ds[-1]
+ {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
+ ```
+ """
+ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()})
+ # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe"
+ dset_features, item_features = _align_features(
+ [self._info.features, Features.from_arrow_schema(item_table.schema)]
+ )
+ # Cast to align the schemas of the tables and concatenate the tables
+ table = concat_tables(
+ [
+ self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data,
+ item_table.cast(item_features.arrow_schema),
+ ]
+ )
+ if self._indices is None:
+ indices_table = None
+ else:
+ item_indices_array = pa.array([len(self._data)], type=pa.uint64())
+ item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"])
+ indices_table = concat_tables([self._indices, item_indices_table])
+ info = self.info.copy()
+ info.features.update(item_features)
+ table = update_metadata_with_features(table, info.features)
+ return Dataset(
+ table,
+ info=info,
+ split=self.split,
+ indices_table=indices_table,
+ fingerprint=new_fingerprint,
+ )
+
+ def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset":
+ """Align the dataset's label ID and label name mapping to match an input `label2id` mapping.
+ This is useful when you want to ensure that a model's predicted labels are aligned with the dataset.
+ The alignment in done using the lowercase label names.
+
+ Args:
+ label2id (`dict`):
+ The label name to ID mapping to align the dataset with.
+ label_column (`str`):
+ The column name of labels to align on.
+
+ Example:
+
+ ```python
+ >>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2}
+ >>> ds = load_dataset("glue", "mnli", split="train")
+ >>> # mapping to align with
+ >>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
+ >>> ds_aligned = ds.align_labels_with_mapping(label2id, "label")
+ ```
+
+ """
+ # Sanity checks
+ if label_column not in self._data.column_names:
+ raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).")
+
+ label_feature = self._info.features[label_column]
+ if not (
+ isinstance(label_feature, ClassLabel)
+ or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel))
+ ):
+ raise ValueError(
+ f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}."
+ )
+
+ # Sort input mapping by ID value to ensure the label names are aligned
+ label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))
+ label_names = list(label2id.keys())
+ # Some label mappings use uppercase label names so we lowercase them during alignment
+ label2id = {k.lower(): v for k, v in label2id.items()}
+ int2str_function = (
+ label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str
+ )
+
+ if isinstance(label_feature, ClassLabel):
+
+ def process_label_ids(batch):
+ dset_label_names = [
+ int2str_function(label_id).lower() if label_id is not None else None
+ for label_id in batch[label_column]
+ ]
+ batch[label_column] = [
+ label2id[label_name] if label_name is not None else None for label_name in dset_label_names
+ ]
+ return batch
+
+ else:
+
+ def process_label_ids(batch):
+ dset_label_names = [
+ [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq]
+ for seq in batch[label_column]
+ ]
+ batch[label_column] = [
+ [label2id[label_name] if label_name is not None else None for label_name in seq]
+ for seq in dset_label_names
+ ]
+ return batch
+
+ features = self.features
+ features[label_column] = (
+ ClassLabel(num_classes=len(label_names), names=label_names)
+ if isinstance(label_feature, ClassLabel)
+ else Sequence(ClassLabel(num_classes=len(label_names), names=label_names))
+ )
+ return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels")
+
+
+def _concatenate_map_style_datasets(
+ dsets: List[Dataset],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+):
+ """
+ Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`.
+ When you concatenate on axis 0, missing data are filled with None values.
+
+ Args:
+ dsets (`List[datasets.Dataset]`): List of Datasets to concatenate.
+ info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (:class:`NamedSplit`, optional): Name of the dataset split.
+ axis (``{0, 1}``, default ``0``, meaning over rows):
+ Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
+ (horizontally).
+
+ *New in version 1.6.0*
+
+ Example:
+
+ ```py
+ >>> ds3 = _concatenate_map_style_datasets([ds1, ds2])
+ ```
+ """
+ # Ignore datasets with no rows
+ if any(dset.num_rows > 0 for dset in dsets):
+ dsets = [dset for dset in dsets if dset.num_rows > 0]
+ else:
+ # Return first dataset if all datasets are empty
+ return dsets[0]
+
+ # Perform checks (and a potentional cast if axis=0)
+ if axis == 0:
+ _check_if_features_can_be_aligned([dset.features for dset in dsets])
+ else:
+ if not all(dset.num_rows == dsets[0].num_rows for dset in dsets):
+ raise ValueError("Number of rows must match for all datasets")
+ _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names])
+
+ # Find common format or reset format
+ format = dsets[0].format
+ if any(dset.format != format for dset in dsets):
+ format = {}
+ logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.")
+
+ def apply_offset_to_indices_table(table, offset):
+ if offset == 0:
+ return table
+ else:
+ array = table["indices"]
+ new_array = pc.add(array, pa.scalar(offset, type=pa.uint64()))
+ return InMemoryTable.from_arrays([new_array], names=["indices"])
+
+ # Concatenate indices if they exist
+ if any(dset._indices is not None for dset in dsets):
+ if axis == 0:
+ # Datasets with no indices tables are replaced with a dataset with an indices table in memory.
+ # Applying an offset to an indices table also brings the table in memory.
+ indices_tables = []
+ for i in range(len(dsets)):
+ if dsets[i]._indices is None:
+ dsets[i] = dsets[i]._select_with_indices_mapping(range(len(dsets[i])))
+ indices_tables.append(dsets[i]._indices)
+
+ # An offset needs to be applied to the indices before concatenating
+ offset = 0
+ for i in range(len(dsets)):
+ indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset)
+ offset += len(dsets[i]._data)
+
+ # Concatenate indices
+ indices_tables = [t for t in indices_tables if len(t) > 0]
+ if indices_tables:
+ indices_table = concat_tables(indices_tables)
+ else:
+ indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()}))
+ else:
+ if len(dsets) == 1:
+ indices_table = dsets[0]._indices
+ else:
+ for i in range(len(dsets)):
+ dsets[i] = dsets[i].flatten_indices()
+ indices_table = None
+ else:
+ indices_table = None
+
+ table = concat_tables([dset._data for dset in dsets], axis=axis)
+ if axis == 0:
+ features_list = _align_features([dset.features for dset in dsets])
+ else:
+ features_list = [dset.features for dset in dsets]
+ table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()})
+
+ # Concatenate infos
+ if info is None:
+ info = DatasetInfo.from_merge([dset.info for dset in dsets])
+ fingerprint = update_fingerprint(
+ "".join(dset._fingerprint for dset in dsets), _concatenate_map_style_datasets, {"info": info, "split": split}
+ )
+
+ # Make final concatenated dataset
+ concatenated_dataset = Dataset(
+ table,
+ info=info,
+ split=split,
+ indices_table=indices_table,
+ fingerprint=fingerprint,
+ )
+ concatenated_dataset.set_format(**format)
+ return concatenated_dataset
+
+
+def _interleave_map_style_datasets(
+ datasets: List["Dataset"],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ **kwargs,
+) -> "Dataset":
+ """
+ Interleave several map-style datasets (sources) into a single map-style dataset.
+ The new dataset is constructed by alternating between the sources to get the examples.
+ If `probabilities = None` (default) the new dataset is constructed by cycling between each source to get the examples.
+ If `probabilities` is not `None, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
+
+ Args:
+ datasets (`List[Dataset]`): list of datasets to interleave
+ probabilities (`List[float]`, optional, default None): If specified, the new dataset is constructed by sampling
+ examples from one source at a time according to these probabilities.
+ seed (`int`, optional, default None): The random seed used to choose a source for each example.
+ info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (:class:`NamedSplit`, optional): Name of the dataset split.
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+ **kwargs (additional keyword arguments): Keyword arguments to be passed to :meth:`datasets.Datasets.select` when selecting the indices used to interleave the datasets.
+
+ Output:
+ :class:`datasets.Dataset`
+ """
+ if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
+ raise ValueError(
+ f"{stopping_strategy} stopping strategy in `interleave_datasets` is not implemented yet with a list of {type(datasets[0])}"
+ )
+
+ # To interleave the datasets, we concatenate them and then we re-order the indices
+ concatenated_datasets = _concatenate_map_style_datasets(datasets, info=info, split=split)
+
+ # Let's now build the indices to pass to .select()
+ lengths = [len(dset) for dset in datasets]
+ offsets = np.cumsum([0] + lengths[:-1])
+
+ # if stopping_strategy is "first_exhausted", it is an undersampling situation whereas it is an oversampling situation if it is "all_exhausted"
+ oversampling = stopping_strategy == "all_exhausted"
+
+ if probabilities is None and not oversampling:
+ # Undersampling situation with cycling between each sources
+ # Example:: If lengths of the datasets are [3, 4, 5]
+ # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 6, 9]
+ # Note that we only have 3 examples per dataset since the first dataset ran out of examples
+
+ # Reasoning behind the following operation: keeping the min_length first indices of each dataset
+ # while offsetting in order to correspond to the right indices of the concatenated dataset
+ # and flattening to effectively interleave the datasets
+ indices = (offsets.reshape(1, -1) + np.arange(min(lengths)).reshape(-1, 1)).flatten().tolist()
+ elif probabilities is None:
+ # Oversampling situation with cycling between each sources
+ # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 5, 9, 0, 6, 10, 1, 3, 11]
+ # Note that we have 5 examples per dataset with a rolling window since the longest dataset has 5 samples
+
+ # Reasoning behind the following operation: for each dataset indices (i.e column) repeat the indices to have max_length indices per dataset
+ # For example, if the max_length is 5 and the i-th dataset has 3 samples, the i-th column will be [0,1,2,0,1]
+ indices = np.mod(np.arange(max(lengths)).reshape(-1, 1), np.array(lengths).reshape(1, -1))
+
+ # We have to keep the indices to their respective dataset offsets and to flatten to effectively interleave the datasets
+ indices = (indices + offsets).flatten().tolist()
+
+ else:
+ # boolean array indicating if at index i if the dataset_i has been fully exhausted
+ is_exhausted = np.full(len(lengths), False)
+
+ # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
+ # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
+ bool_strategy_func = np.all if oversampling else np.any
+
+ def iter_random_indices():
+ """Get an infinite iterator that randomly samples the index of the source to pick examples from."""
+ rng = np.random.default_rng(seed)
+ while True:
+ yield from (int(i) for i in rng.choice(len(datasets), size=1000, p=probabilities))
+
+ current_index = [0] * len(datasets)
+ indices = []
+ for source_idx in iter_random_indices():
+ # If no oversampling, we stop as soon as a dataset has ran out of examples (np.any)
+ # Otherwise, we stop as soon as every dataset has ran out of examples (np.all)
+ if bool_strategy_func(is_exhausted):
+ # the stopping condition was reached, let's stop
+ break
+
+ # let's add the example at the current index of the `source_idx`-th dataset
+ indices.append(current_index[source_idx] + offsets[source_idx])
+ current_index[source_idx] += 1
+
+ # we've ran out of examples for the current dataset, let's update our boolean array and bring the current_index back to 0
+ if current_index[source_idx] >= lengths[source_idx]:
+ is_exhausted[source_idx] = True
+ current_index[source_idx] = 0
+
+ return concatenated_datasets.select(indices, **kwargs)
+
+
+def _split_by_node_map_style_dataset(dataset: Dataset, rank: int, world_size: int) -> Dataset:
+ """
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
+
+ Args:
+ dataset ([`Dataset`]):
+ The dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`Dataset`]: The dataset to be used on the node at rank `rank`.
+ """
+ return dataset.shard(num_shards=world_size, index=rank, contiguous=True)
+
+
+# This is outside Dataset.filter as it needs to be picklable for multiprocessing
+
+
+def get_indices_from_mask_function(
+ function: Callable,
+ batched: bool,
+ with_indices: bool,
+ with_rank: bool,
+ input_columns: Optional[Union[str, List[str]]],
+ indices_mapping: Optional[Table] = None,
+ *args,
+ **fn_kwargs,
+):
+ if batched:
+ # we extract indices and rank from args
+ *inputs, indices, rank = args
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices,)
+ if with_rank:
+ additional_args += (rank,)
+ mask = function(*inputs, *additional_args, **fn_kwargs)
+ else:
+ # we get batched data (to do less look-ups) but `function` only accepts one example
+ # therefore we need to call `function` on each example of the batch to get the mask
+ *inputs, indices, rank = args
+ mask = []
+ if input_columns is None:
+ # inputs only contains a batch of examples
+ batch: dict = inputs[0]
+ num_examples = len(batch[next(iter(batch.keys()))])
+ for i in range(num_examples):
+ example = {key: batch[key][i] for key in batch}
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(example, *additional_args, **fn_kwargs))
+ else:
+ # inputs is a list of columns
+ columns: List[List] = inputs
+ num_examples = len(columns[0])
+ for i in range(num_examples):
+ input = [column[i] for column in columns]
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(*input, *additional_args, **fn_kwargs))
+ indices_array = [i for i, to_keep in zip(indices, mask) if to_keep]
+ if indices_mapping is not None:
+ indices_array = pa.array(indices_array, type=pa.uint64())
+ indices_array = indices_mapping.column(0).take(indices_array)
+ indices_array = indices_array.to_pylist()
+ return {"indices": indices_array}
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/builder.py b/llmeval-env/lib/python3.10/site-packages/datasets/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..85f7d88b7dbf7bc01589b70fe8c539224e6700cf
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/builder.py
@@ -0,0 +1,2293 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""DatasetBuilder base class."""
+
+import abc
+import contextlib
+import copy
+import inspect
+import os
+import posixpath
+import shutil
+import textwrap
+import time
+import urllib
+import warnings
+from dataclasses import dataclass
+from functools import partial
+from pathlib import Path
+from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, Union
+from unittest.mock import patch
+
+import fsspec
+import pyarrow as pa
+from fsspec.core import url_to_fs
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from . import config, utils
+from .arrow_dataset import Dataset
+from .arrow_reader import (
+ HF_GCP_BASE_URL,
+ ArrowReader,
+ DatasetNotOnHfGcsError,
+ MissingFilesOnHfGcsError,
+ ReadInstruction,
+)
+from .arrow_writer import ArrowWriter, BeamWriter, ParquetWriter, SchemaInferenceError
+from .data_files import DataFilesDict, DataFilesPatternsDict, sanitize_patterns
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager, DownloadMode
+from .download.mock_download_manager import MockDownloadManager
+from .download.streaming_download_manager import StreamingDownloadManager, xjoin, xopen
+from .exceptions import DatasetGenerationCastError, DatasetGenerationError, FileFormatError, ManualDownloadError
+from .features import Features
+from .filesystems import (
+ is_remote_filesystem,
+ rename,
+)
+from .fingerprint import Hasher
+from .info import DatasetInfo, DatasetInfosDict, PostProcessedInfo
+from .iterable_dataset import ArrowExamplesIterable, ExamplesIterable, IterableDataset
+from .keyhash import DuplicatedKeysError
+from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase
+from .splits import Split, SplitDict, SplitGenerator, SplitInfo
+from .streaming import extend_dataset_builder_for_streaming
+from .table import CastError
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils._filelock import FileLock
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import cached_path, is_remote_url
+from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits
+from .utils.py_utils import (
+ classproperty,
+ convert_file_size_to_int,
+ has_sufficient_disk_space,
+ iflatmap_unordered,
+ map_nested,
+ memoize,
+ size_str,
+ temporary_assignment,
+)
+from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs
+from .utils.track import tracked_list
+
+
+if TYPE_CHECKING:
+ from .load import DatasetModule
+
+
+logger = logging.get_logger(__name__)
+
+
+class InvalidConfigName(ValueError):
+ pass
+
+
+@dataclass
+class BuilderConfig:
+ """Base class for `DatasetBuilder` data configuration.
+
+ `DatasetBuilder` subclasses with data configuration options should subclass
+ `BuilderConfig` and add their own properties.
+
+ Attributes:
+ name (`str`, defaults to `default`):
+ The name of the configuration.
+ version (`Version` or `str`, defaults to `0.0.0`):
+ The version of the configuration.
+ data_dir (`str`, *optional*):
+ Path to the directory containing the source data.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ description (`str`, *optional*):
+ A human description of the configuration.
+ """
+
+ name: str = "default"
+ version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
+ data_dir: Optional[str] = None
+ data_files: Optional[Union[DataFilesDict, DataFilesPatternsDict]] = None
+ description: Optional[str] = None
+
+ def __post_init__(self):
+ # The config name is used to name the cache directory.
+ for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
+ if invalid_char in self.name:
+ raise InvalidConfigName(
+ f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
+ f"They could create issues when creating a directory for this config on Windows filesystem."
+ )
+ if self.data_files is not None and not isinstance(self.data_files, (DataFilesDict, DataFilesPatternsDict)):
+ raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}")
+
+ def __eq__(self, o):
+ # we need to override the default dataclass __eq__ since it doesn't check for
+ # other attributes that the ones of the signature.
+ if set(self.__dict__.keys()) != set(o.__dict__.keys()):
+ return False
+ return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
+
+ def create_config_id(
+ self,
+ config_kwargs: dict,
+ custom_features: Optional[Features] = None,
+ ) -> str:
+ """
+ The config id is used to build the cache directory.
+ By default it is equal to the config name.
+ However the name of a config is not sufficient to have a unique identifier for the dataset being generated
+ since it doesn't take into account:
+ - the config kwargs that can be used to overwrite attributes
+ - the custom features used to write the dataset
+ - the data_files for json/text/csv/pandas datasets
+
+ Therefore the config id is just the config name with an optional suffix based on these.
+ """
+ # Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
+ suffix: Optional[str] = None
+ config_kwargs_to_add_to_suffix = config_kwargs.copy()
+ # name and version are already used to build the cache directory
+ config_kwargs_to_add_to_suffix.pop("name", None)
+ config_kwargs_to_add_to_suffix.pop("version", None)
+ # data dir handling (when specified it points to the manually downloaded data):
+ # it was previously ignored before the introduction of config id because we didn't want
+ # to change the config name. Now it's fine to take it into account for the config id.
+ # config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ if "data_dir" in config_kwargs_to_add_to_suffix:
+ if config_kwargs_to_add_to_suffix["data_dir"] is None:
+ config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ else:
+ # canonicalize the data dir to avoid two paths to the same location having different
+ # hashes
+ data_dir = config_kwargs_to_add_to_suffix["data_dir"]
+ data_dir = os.path.normpath(data_dir)
+ config_kwargs_to_add_to_suffix["data_dir"] = data_dir
+ if config_kwargs_to_add_to_suffix:
+ # we don't care about the order of the kwargs
+ config_kwargs_to_add_to_suffix = {
+ k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
+ }
+ if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
+ suffix = ",".join(
+ str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
+ )
+ if len(suffix) > 32: # hash if too long
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+ else:
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+
+ if custom_features is not None:
+ m = Hasher()
+ if suffix:
+ m.update(suffix)
+ m.update(custom_features)
+ suffix = m.hexdigest()
+
+ if suffix:
+ config_id = self.name + "-" + suffix
+ if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
+ config_id = self.name + "-" + Hasher.hash(suffix)
+ return config_id
+ else:
+ return self.name
+
+ def _resolve_data_files(self, base_path: str, download_config: DownloadConfig) -> None:
+ if isinstance(self.data_files, DataFilesPatternsDict):
+ base_path = xjoin(base_path, self.data_dir) if self.data_dir else base_path
+ self.data_files = self.data_files.resolve(base_path, download_config)
+
+
+class DatasetBuilder:
+ """Abstract base class for all datasets.
+
+ `DatasetBuilder` has 3 key methods:
+
+ - [`DatasetBuilder.info`]: Documents the dataset, including feature
+ names, types, shapes, version, splits, citation, etc.
+ - [`DatasetBuilder.download_and_prepare`]: Downloads the source data
+ and writes it to disk.
+ - [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
+
+ Some `DatasetBuilder`s expose multiple variants of the
+ dataset by defining a [`BuilderConfig`] subclass and accepting a
+ config object (or name) on construction. Configurable datasets expose a
+ pre-defined set of configurations in [`DatasetBuilder.builder_configs`].
+
+ Args:
+ cache_dir (`str`, *optional*):
+ Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
+ dataset_name (`str`, *optional*):
+ Name of the dataset, if different from the builder name. Useful for packaged builders
+ like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets
+ that use the same packaged builder.
+ config_name (`str`, *optional*):
+ Name of the dataset configuration.
+ It affects the data generated on disk. Different configurations will have their own subdirectories and
+ versions.
+ If not provided, the default configuration is used (if it exists).
+
+
+
+ Parameter `name` was renamed to `config_name`.
+
+
+ hash (`str`, *optional*):
+ Hash specific to the dataset code. Used to update the caching directory when the
+ dataset loading script code is updated (to avoid reusing old data).
+ The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files.
+ This can be a remote URL.
+ features ([`Features`], *optional*):
+ Features types to use with this dataset.
+ It can be used to change the [`Features`] types of a dataset, for example.
+ token (`str` or `bool`, *optional*):
+ String or boolean to use as Bearer token for remote files on the
+ Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
+ repo_id (`str`, *optional*):
+ ID of the dataset repository.
+ Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
+ and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ For builders like "csv" or "json" that need the user to specify data files. They can be either
+ local or remote files. For convenience, you can use a `DataFilesDict`.
+ data_dir (`str`, *optional*):
+ Path to directory containing source data file(s).
+ Use only if `data_files` is not passed, in which case it is equivalent to passing
+ `os.path.join(data_dir, "**")` as `data_files`.
+ For builders that require manual download, it must be the path to the local directory containing the
+ manually downloaded data.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
+ writer_batch_size (`int`, *optional*):
+ Batch size used by the ArrowWriter.
+ It defines the number of samples that are kept in memory before writing them
+ and also the length of the arrow chunks.
+ None means that the ArrowWriter will use its default value.
+ name (`str`): Configuration name for the dataset.
+
+
+
+ Use `config_name` instead.
+
+
+
+ **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
+ configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
+ configuration class is [`BuilderConfig`] or a subclass of it.
+ """
+
+ # Default version
+ VERSION = None # Default version set in BuilderConfig
+
+ # Class for the builder config.
+ BUILDER_CONFIG_CLASS = BuilderConfig
+
+ # Named configurations that modify the data generated by download_and_prepare.
+ BUILDER_CONFIGS = []
+
+ # Optional default config name to be used when name is None
+ DEFAULT_CONFIG_NAME = None
+
+ # Default batch size used by the ArrowWriter
+ # It defines the number of samples that are kept in memory before writing them
+ # and also the length of the arrow chunks
+ # None means that the ArrowWriter will use its default value
+ DEFAULT_WRITER_BATCH_SIZE = None
+
+ def __init__(
+ self,
+ cache_dir: Optional[str] = None,
+ dataset_name: Optional[str] = None,
+ config_name: Optional[str] = None,
+ hash: Optional[str] = None,
+ base_path: Optional[str] = None,
+ info: Optional[DatasetInfo] = None,
+ features: Optional[Features] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ repo_id: Optional[str] = None,
+ data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
+ data_dir: Optional[str] = None,
+ storage_options: Optional[dict] = None,
+ writer_batch_size: Optional[int] = None,
+ name="deprecated",
+ **config_kwargs,
+ ):
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if name != "deprecated":
+ warnings.warn(
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+ config_name = name
+ # DatasetBuilder name
+ self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
+ self.hash: Optional[str] = hash
+ self.base_path = base_path
+ self.token = token
+ # For backwards compatibility (e.g. if accessed in a dataset script)
+ self.use_auth_token = token
+ self.repo_id = repo_id
+ self.storage_options = storage_options or {}
+ self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name
+ self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
+
+ if data_files is not None and not isinstance(data_files, DataFilesDict):
+ data_files = DataFilesDict.from_patterns(
+ sanitize_patterns(data_files),
+ base_path=base_path,
+ download_config=DownloadConfig(token=token, storage_options=self.storage_options),
+ )
+
+ # Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
+ if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
+ config_kwargs["features"] = features
+ if data_files is not None:
+ config_kwargs["data_files"] = data_files
+ if data_dir is not None:
+ config_kwargs["data_dir"] = data_dir
+ self.config_kwargs = config_kwargs
+ self.config, self.config_id = self._create_builder_config(
+ config_name=config_name,
+ custom_features=features,
+ **config_kwargs,
+ )
+
+ # prepare info: DatasetInfo are a standardized dataclass across all datasets
+ # Prefill datasetinfo
+ if info is None:
+ # TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
+ info = self.get_exported_dataset_info()
+ info.update(self._info())
+ info.builder_name = self.name
+ info.dataset_name = self.dataset_name
+ info.config_name = self.config.name
+ info.version = self.config.version
+ self.info = info
+ # update info with user specified infos
+ if features is not None:
+ self.info.features = features
+
+ # Prepare data dirs:
+ # cache_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
+ self._cache_dir_root = (
+ self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
+ )
+ self._cache_downloaded_dir = (
+ posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
+ if cache_dir
+ else str(config.DOWNLOADED_DATASETS_PATH)
+ )
+ self._cache_downloaded_dir = (
+ self._cache_downloaded_dir
+ if is_remote_url(self._cache_downloaded_dir)
+ else os.path.expanduser(self._cache_downloaded_dir)
+ )
+
+ # In case there exists a legacy cache directory
+ self._legacy_relative_data_dir = None
+
+ self._cache_dir = self._build_cache_dir()
+ if not is_remote_url(self._cache_dir_root):
+ os.makedirs(self._cache_dir_root, exist_ok=True)
+ lock_path = os.path.join(
+ self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock"
+ )
+ with FileLock(lock_path):
+ if os.path.exists(self._cache_dir): # check if data exist
+ if len(os.listdir(self._cache_dir)) > 0:
+ if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)):
+ logger.info("Overwrite dataset info from restored data version if exists.")
+ self.info = DatasetInfo.from_directory(self._cache_dir)
+ else: # dir exists but no data, remove the empty dir as data aren't available anymore
+ logger.warning(
+ f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. "
+ )
+ os.rmdir(self._cache_dir)
+
+ # Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
+ self._output_dir = self._cache_dir
+ self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
+
+ # Set download manager
+ self.dl_manager = None
+
+ # Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
+ self._record_infos = False
+
+ # Set in `.download_and_prepare` once the format of the generated dataset is known
+ self._file_format = None
+
+ # Enable streaming (e.g. it patches "open" to work with remote files)
+ extend_dataset_builder_for_streaming(self)
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-enable streaming, since patched functions are not kept when pickling
+ extend_dataset_builder_for_streaming(self)
+
+ # Must be set for datasets that use 'data_dir' functionality - the ones
+ # that require users to do additional steps to download the data
+ # (this is usually due to some external regulations / rules).
+ # This field should contain a string with user instructions, including
+ # the list of files that should be present. It will be
+ # displayed in the dataset documentation.
+ @property
+ def manual_download_instructions(self) -> Optional[str]:
+ return None
+
+ def _check_legacy_cache(self) -> Optional[str]:
+ """Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13"""
+ if (
+ self.__module__.startswith("datasets.")
+ and not is_remote_url(self._cache_dir_root)
+ and self.config.name == "default"
+ ):
+ from .packaged_modules import _PACKAGED_DATASETS_MODULES
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
+ config_id = config_name + self.config_id[len(self.config.name) :]
+ hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
+ legacy_relative_data_dir = posixpath.join(
+ self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
+ config_id,
+ "0.0.0",
+ hash,
+ )
+ legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
+ if os.path.isdir(legacy_cache_dir):
+ return legacy_relative_data_dir
+
+ def _check_legacy_cache2(self, dataset_module: "DatasetModule") -> Optional[str]:
+ """Check for the old cache directory template {cache_dir}/{namespace}___{dataset_name}/{config_name}-xxx from 2.14 and 2.15"""
+ if (
+ self.__module__.startswith("datasets.")
+ and not is_remote_url(self._cache_dir_root)
+ and not (set(self.config_kwargs) - {"data_files", "data_dir"})
+ ):
+ from .packaged_modules import _PACKAGED_DATASETS_MODULES
+ from .utils._dill import Pickler
+
+ def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str:
+ """
+ Used to update hash of packaged modules which is used for creating unique cache directories to reflect
+ different config parameters which are passed in metadata from readme.
+ """
+ params_to_exclude = {"config_name", "version", "description"}
+ params_to_add_to_hash = {
+ param: value
+ for param, value in sorted(config_parameters.items())
+ if param not in params_to_exclude
+ }
+ m = Hasher()
+ m.update(hash)
+ m.update(params_to_add_to_hash)
+ return m.hexdigest()
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ with patch.object(Pickler, "_legacy_no_dict_keys_sorting", True):
+ config_id = self.config.name + "-" + Hasher.hash({"data_files": self.config.data_files})
+ hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
+ if (
+ dataset_module.builder_configs_parameters.metadata_configs
+ and self.config.name in dataset_module.builder_configs_parameters.metadata_configs
+ ):
+ hash = update_hash_with_config_parameters(
+ hash, dataset_module.builder_configs_parameters.metadata_configs[self.config.name]
+ )
+ legacy_relative_data_dir = posixpath.join(
+ self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
+ config_id,
+ "0.0.0",
+ hash,
+ )
+ legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
+ if os.path.isdir(legacy_cache_dir):
+ return legacy_relative_data_dir
+
+ @classmethod
+ def get_all_exported_dataset_infos(cls) -> DatasetInfosDict:
+ """Empty dict if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_all_exported_dataset_infos()
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)}
+ ```
+ """
+ return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
+
+ def get_exported_dataset_info(self) -> DatasetInfo:
+ """Empty `DatasetInfo` if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_exported_dataset_info()
+ DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)
+ ```
+ """
+ return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
+
+ def _create_builder_config(
+ self, config_name=None, custom_features=None, **config_kwargs
+ ) -> Tuple[BuilderConfig, str]:
+ """Create and validate BuilderConfig object as well as a unique config id for this config.
+ Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
+ config_kwargs override the defaults kwargs in config
+ """
+ builder_config = None
+
+ # try default config
+ if config_name is None and self.BUILDER_CONFIGS:
+ if self.DEFAULT_CONFIG_NAME is not None:
+ builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME)
+ logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}")
+ else:
+ if len(self.BUILDER_CONFIGS) > 1:
+ if not config_kwargs:
+ example_of_usage = f"load_dataset('{self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')"
+ raise ValueError(
+ "Config name is missing."
+ f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}"
+ + f"\nExample of usage:\n\t`{example_of_usage}`"
+ )
+ else:
+ builder_config = self.BUILDER_CONFIGS[0]
+ logger.info(
+ f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}"
+ )
+
+ # try to get config by name
+ if isinstance(config_name, str):
+ builder_config = self.builder_configs.get(config_name)
+ if builder_config is None and self.BUILDER_CONFIGS:
+ raise ValueError(
+ f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}"
+ )
+
+ # if not using an existing config, then create a new config on the fly
+ if not builder_config:
+ if config_name is not None:
+ config_kwargs["name"] = config_name
+ elif self.DEFAULT_CONFIG_NAME and not config_kwargs:
+ # Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed
+ config_kwargs["name"] = self.DEFAULT_CONFIG_NAME
+ if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
+ config_kwargs["version"] = self.VERSION
+ builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
+
+ # otherwise use the config_kwargs to overwrite the attributes
+ else:
+ builder_config = copy.deepcopy(builder_config) if config_kwargs else builder_config
+ for key, value in config_kwargs.items():
+ if value is not None:
+ if not hasattr(builder_config, key):
+ raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.")
+ setattr(builder_config, key, value)
+
+ if not builder_config.name:
+ raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}")
+
+ # resolve data files if needed
+ builder_config._resolve_data_files(
+ base_path=self.base_path,
+ download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
+ )
+
+ # compute the config id that is going to be used for caching
+ config_id = builder_config.create_config_id(
+ config_kwargs,
+ custom_features=custom_features,
+ )
+ is_custom = (config_id not in self.builder_configs) and config_id != "default"
+ if is_custom:
+ logger.info(f"Using custom data configuration {config_id}")
+ else:
+ if (
+ builder_config.name in self.builder_configs
+ and builder_config != self.builder_configs[builder_config.name]
+ ):
+ raise ValueError(
+ "Cannot name a custom BuilderConfig the same as an available "
+ f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}"
+ )
+ if not builder_config.version:
+ raise ValueError(f"BuilderConfig {builder_config.name} must have a version")
+
+ return builder_config, config_id
+
+ @classproperty
+ @classmethod
+ @memoize()
+ def builder_configs(cls) -> Dict[str, BuilderConfig]:
+ """Dictionary of pre-defined configurations for this builder class."""
+ configs = {config.name: config for config in cls.BUILDER_CONFIGS}
+ if len(configs) != len(cls.BUILDER_CONFIGS):
+ names = [config.name for config in cls.BUILDER_CONFIGS]
+ raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}")
+ return configs
+
+ @property
+ def cache_dir(self):
+ return self._cache_dir
+
+ def _use_legacy_cache_dir_if_possible(self, dataset_module: "DatasetModule"):
+ # Check for the legacy cache directory template (datasets<3.0.0)
+ self._legacy_relative_data_dir = (
+ self._check_legacy_cache2(dataset_module) or self._check_legacy_cache() or None
+ )
+ self._cache_dir = self._build_cache_dir()
+ self._output_dir = self._cache_dir
+
+ def _relative_data_dir(self, with_version=True, with_hash=True) -> str:
+ """Relative path of this dataset in cache_dir:
+ Will be:
+ self.dataset_name/self.config.version/self.hash/
+ or if a repo_id with a namespace has been specified:
+ self.namespace___self.dataset_name/self.config.version/self.hash/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ if self._legacy_relative_data_dir is not None and with_version and with_hash:
+ return self._legacy_relative_data_dir
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ builder_data_dir = self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}"
+ builder_data_dir = posixpath.join(builder_data_dir, self.config_id)
+ if with_version:
+ builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version))
+ if with_hash and self.hash and isinstance(self.hash, str):
+ builder_data_dir = posixpath.join(builder_data_dir, self.hash)
+ return builder_data_dir
+
+ def _build_cache_dir(self):
+ """Return the data directory for the current version."""
+ builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False))
+ version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True))
+
+ def _other_versions_on_disk():
+ """Returns previous versions on disk."""
+ if not os.path.exists(builder_data_dir):
+ return []
+
+ version_dirnames = []
+ for dir_name in os.listdir(builder_data_dir):
+ try:
+ version_dirnames.append((utils.Version(dir_name), dir_name))
+ except ValueError: # Invalid version (ex: incomplete data dir)
+ pass
+ version_dirnames.sort(reverse=True)
+ return version_dirnames
+
+ # Check and warn if other versions exist
+ if not is_remote_url(builder_data_dir):
+ version_dirs = _other_versions_on_disk()
+ if version_dirs:
+ other_version = version_dirs[0][0]
+ if other_version != self.config.version:
+ warn_msg = (
+ f"Found a different version {str(other_version)} of dataset {self.dataset_name} in "
+ f"cache_dir {self._cache_dir_root}. Using currently defined version "
+ f"{str(self.config.version)}."
+ )
+ logger.warning(warn_msg)
+
+ return version_data_dir
+
+ @abc.abstractmethod
+ def _info(self) -> DatasetInfo:
+ """Construct the DatasetInfo object. See `DatasetInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (DatasetInfo) The dataset information
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def get_imported_module_dir(cls):
+ """Return the path of the module of this class or subclass."""
+ return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
+
+ def _rename(self, src: str, dst: str):
+ rename(self._fs, src, dst)
+
+ def download_and_prepare(
+ self,
+ output_dir: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ try_from_hf_gcs="deprecated",
+ dl_manager: Optional[DownloadManager] = None,
+ base_path: Optional[str] = None,
+ use_auth_token="deprecated",
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **download_and_prepare_kwargs,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ output_dir (`str`, *optional*):
+ Output directory for the dataset.
+ Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default.
+
+
+ download_config (`DownloadConfig`, *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, *optional*):
+ Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ try_from_hf_gcs (`bool`):
+ If `True`, it will try to download the already prepared dataset from the HF Google cloud storage.
+
+
+
+ `try_from_hf_gcs` was deprecated in version 2.16.0 and will be removed in 3.0.0.
+ Host the processed files on the Hugging Face Hub instead.
+
+
+ dl_manager (`DownloadManager`, *optional*):
+ Specific `DownloadManger` to use.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files. This can be a remote url.
+ If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead.
+ use_auth_token (`Union[str, bool]`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from ~/.huggingface.
+
+
+
+ Pass `use_auth_token` to `load_dataset_builder` instead.
+
+
+ file_format (`str`, *optional*):
+ Format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files.
+
+
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+
+
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the caching file-system backend, if any.
+
+
+ **download_and_prepare_kwargs (additional keyword arguments): Keyword arguments.
+
+ Example:
+
+ Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare()
+ ```
+
+ Download and prepare the dataset as sharded Parquet files locally:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare("./output_dir", file_format="parquet")
+ ```
+
+ Download and prepare the dataset as sharded Parquet files in a cloud storage:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key}
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet")
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass `token` to `load_dataset_builder` instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ else:
+ token = self.token
+
+ if try_from_hf_gcs != "deprecated":
+ warnings.warn(
+ "'try_from_hf_gcs' was deprecated in version 2.16.0 and will be removed in 3.0.0.",
+ FutureWarning,
+ )
+ else:
+ try_from_hf_gcs = False
+
+ output_dir = output_dir if output_dir is not None else self._cache_dir
+ # output_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ fs, output_dir = url_to_fs(output_dir, **(storage_options or {}))
+ self._fs = fs
+ self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir)
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+ base_path = base_path if base_path is not None else self.base_path
+
+ if file_format is not None and file_format not in ["arrow", "parquet"]:
+ raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'")
+ self._file_format = file_format
+
+ if self._fs._strip_protocol(self._output_dir) == "":
+ # We don't support the root directory, because it has no dirname,
+ # and we need a dirname to use a .incomplete directory
+ # when the dataset is being written
+ raise RuntimeError(
+ f"Unable to download and prepare the dataset at the root {self._output_dir}. "
+ f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'"
+ )
+
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig(
+ cache_dir=self._cache_downloaded_dir,
+ force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ use_etag=False,
+ num_proc=num_proc,
+ token=token,
+ storage_options=self.storage_options,
+ ) # We don't use etag for data files to speed up the process
+
+ dl_manager = DownloadManager(
+ dataset_name=self.dataset_name,
+ download_config=download_config,
+ data_dir=self.config.data_dir,
+ base_path=base_path,
+ record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS),
+ )
+
+ is_local = not is_remote_filesystem(self._fs)
+
+ if (
+ isinstance(dl_manager, MockDownloadManager)
+ or not is_local
+ or file_format != "arrow"
+ or max_shard_size is not None
+ ):
+ try_from_hf_gcs = False
+ self.dl_manager = dl_manager
+
+ # Prevent parallel local disk operations
+ if is_local:
+ # Create parent directory of the output_dir to put the lock file in there
+ Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
+ lock_path = self._output_dir + "_builder.lock"
+
+ # File locking only with local paths; no file locking on GCS or S3
+ with FileLock(lock_path) if is_local else contextlib.nullcontext():
+ # Check if the data already exists
+ data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME))
+ if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS:
+ logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})")
+ # We need to update the info in case some splits were added in the meantime
+ # for example when calling load_dataset from multiple workers.
+ self.info = self._load_info()
+ self.download_post_processing_resources(dl_manager)
+ return
+
+ logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})")
+ if is_local: # if cache dir is local, check for available space
+ if not has_sufficient_disk_space(
+ self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent
+ ):
+ raise OSError(
+ f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})"
+ )
+
+ @contextlib.contextmanager
+ def incomplete_dir(dirname):
+ """Create temporary dir for dirname and rename on exit."""
+ if not is_local:
+ self._fs.makedirs(dirname, exist_ok=True)
+ yield dirname
+ else:
+ tmp_dir = dirname + ".incomplete"
+ os.makedirs(tmp_dir, exist_ok=True)
+ try:
+ yield tmp_dir
+ if os.path.isdir(dirname):
+ shutil.rmtree(dirname)
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory
+ shutil.move(tmp_dir, dirname)
+ finally:
+ if os.path.exists(tmp_dir):
+ shutil.rmtree(tmp_dir)
+
+ # Print is intentional: we want this to always go to stdout so user has
+ # information needed to cancel download/preparation if needed.
+ # This comes right before the progress bar.
+ if self.info.size_in_bytes:
+ logger.info(
+ f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} "
+ f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, "
+ f"post-processed: {size_str(self.info.post_processing_size)}, "
+ f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..."
+ )
+ else:
+ _dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir
+ logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...")
+
+ self._check_manual_download(dl_manager)
+
+ # Create a tmp dir and rename to self._output_dir on successful exit.
+ with incomplete_dir(self._output_dir) as tmp_output_dir:
+ # Temporarily assign _output_dir to tmp_data_dir to avoid having to forward
+ # it to every sub function.
+ with temporary_assignment(self, "_output_dir", tmp_output_dir):
+ # Try to download the already prepared dataset files
+ downloaded_from_gcs = False
+ if try_from_hf_gcs:
+ try:
+ self._download_prepared_from_hf_gcs(dl_manager.download_config)
+ downloaded_from_gcs = True
+ except (DatasetNotOnHfGcsError, MissingFilesOnHfGcsError):
+ logger.info("Dataset not on Hf google storage. Downloading and preparing it from source")
+ except ConnectionError:
+ logger.warning("HF google storage unreachable. Downloading and preparing it from source")
+ if not downloaded_from_gcs:
+ prepare_split_kwargs = {"file_format": file_format}
+ if max_shard_size is not None:
+ prepare_split_kwargs["max_shard_size"] = max_shard_size
+ if num_proc is not None:
+ prepare_split_kwargs["num_proc"] = num_proc
+ self._download_and_prepare(
+ dl_manager=dl_manager,
+ verification_mode=verification_mode,
+ **prepare_split_kwargs,
+ **download_and_prepare_kwargs,
+ )
+ # Sync info
+ self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
+ self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
+ self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
+ # Save info
+ self._save_info()
+
+ # Download post processing resources
+ self.download_post_processing_resources(dl_manager)
+
+ logger.info(
+ f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. "
+ f"Subsequent calls will reuse this data."
+ )
+
+ def _check_manual_download(self, dl_manager):
+ if self.manual_download_instructions is not None and dl_manager.manual_dir is None:
+ raise ManualDownloadError(
+ textwrap.dedent(
+ f"""\
+ The dataset {self.dataset_name} with config {self.config.name} requires manual data.
+ Please follow the manual download instructions:
+ {self.manual_download_instructions}
+ Manual data can be loaded with:
+ datasets.load_dataset("{self.dataset_name}", data_dir="")"""
+ )
+ )
+
+ def _download_prepared_from_hf_gcs(self, download_config: DownloadConfig):
+ relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
+ reader = ArrowReader(self._output_dir, self.info)
+ # use reader instructions to download the right files
+ reader.download_from_hf_gcs(download_config, relative_data_dir)
+ downloaded_info = DatasetInfo.from_directory(self._output_dir)
+ self.info.update(downloaded_info)
+ # download post processing resources
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ for split in self.info.splits:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ try:
+ resource_path = cached_path(remote_cache_dir + "/" + resource_file_name)
+ shutil.move(resource_path, os.path.join(self._output_dir, resource_file_name))
+ except ConnectionError:
+ logger.info(f"Couldn't download resourse file {resource_file_name} from Hf google storage.")
+ logger.info("Dataset downloaded from Hf google storage.")
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs):
+ """Downloads and prepares dataset for reading.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required data and generate
+ the pre-processed datasets files.
+
+ Args:
+ dl_manager ([`DownloadManager`]):
+ `DownloadManager` used to download and cache data.
+ verification_mode ([`VerificationMode`]):
+ if `ALL_CHECKS`, perform all the verifications including checksums.
+ if `BASIC_CHECKS`, do not perform checksums, only perform split tests.
+ if `NO_CHECKS`, do not perform any verification.
+ prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size`
+ """
+ # Generating data for all splits
+ split_dict = SplitDict(dataset_name=self.dataset_name)
+ split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
+ split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
+
+ # Checksums verification
+ if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:
+ verify_checksums(
+ self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
+ )
+
+ # Build splits
+ for split_generator in split_generators:
+ if str(split_generator.split_info.name).lower() == "all":
+ raise ValueError(
+ "`all` is a special split keyword corresponding to the "
+ "union of all splits, so cannot be used as key in "
+ "._split_generator()."
+ )
+
+ logger.info(f"Generating {split_generator.split_info.name} split")
+ split_dict.add(split_generator.split_info)
+
+ try:
+ # Prepare split will record examples associated to the split
+ self._prepare_split(split_generator, **prepare_split_kwargs)
+ except OSError as e:
+ raise OSError(
+ "Cannot find data file. "
+ + (self.manual_download_instructions or "")
+ + "\nOriginal error:\n"
+ + str(e)
+ ) from None
+ # If check_duplicates is set to True , then except DuplicatedKeysError
+ except DuplicatedKeysError as e:
+ raise DuplicatedKeysError(
+ e.key,
+ e.duplicate_key_indices,
+ fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py",
+ ) from None
+ dl_manager.manage_extracted_files()
+
+ if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
+ verify_splits(self.info.splits, split_dict)
+
+ # Update the info object with the splits.
+ self.info.splits = split_dict
+ self.info.download_size = dl_manager.downloaded_size
+
+ def download_post_processing_resources(self, dl_manager):
+ for split in self.info.splits or []:
+ for resource_name, resource_file_name in self._post_processing_resources(split).items():
+ if not not is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}")
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resource_path = os.path.join(self._output_dir, resource_file_name)
+ if not os.path.exists(resource_path):
+ downloaded_resource_path = self._download_post_processing_resources(
+ split, resource_name, dl_manager
+ )
+ if downloaded_resource_path:
+ logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}")
+ shutil.move(downloaded_resource_path, resource_path)
+
+ def _load_info(self) -> DatasetInfo:
+ return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_info(self):
+ file_lock = (
+ FileLock(self._output_dir + "_info.lock")
+ if not is_remote_filesystem(self._fs)
+ else contextlib.nullcontext()
+ )
+ with file_lock:
+ self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_infos(self):
+ file_lock = (
+ FileLock(self._output_dir + "_infos.lock")
+ if not is_remote_filesystem(self._fs)
+ else contextlib.nullcontext()
+ )
+ with file_lock:
+ DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ """Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
+ del prepare_split_kwargs
+ return {}
+
+ def as_dataset(
+ self,
+ split: Optional[Split] = None,
+ run_post_process=True,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ in_memory=False,
+ ) -> Union[Dataset, DatasetDict]:
+ """Return a Dataset for the specified split.
+
+ Args:
+ split (`datasets.Split`):
+ Which subset of the data to return.
+ run_post_process (`bool`, defaults to `True`):
+ Whether to run post-processing dataset transforms and/or add
+ indexes.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Whether to ignore the verifications of the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ datasets.Dataset
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder('rotten_tomatoes')
+ >>> builder.download_and_prepare()
+ >>> ds = builder.as_dataset(split='train')
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 8530
+ })
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = verification_mode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if self._file_format is not None and self._file_format != "arrow":
+ raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.')
+ if is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.")
+ if not os.path.exists(self._output_dir):
+ raise FileNotFoundError(
+ f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call "
+ "builder.download_and_prepare(), or use "
+ "datasets.load_dataset() before trying to access the Dataset object."
+ )
+
+ logger.debug(f'Constructing Dataset for split {split or ", ".join(self.info.splits)}, from {self._output_dir}')
+
+ # By default, return all splits
+ if split is None:
+ split = {s: s for s in self.info.splits}
+
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ partial(
+ self._build_single_dataset,
+ run_post_process=run_post_process,
+ verification_mode=verification_mode,
+ in_memory=in_memory,
+ ),
+ split,
+ map_tuple=True,
+ disable_tqdm=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = DatasetDict(datasets)
+ return datasets
+
+ def _build_single_dataset(
+ self,
+ split: Union[str, ReadInstruction, Split],
+ run_post_process: bool,
+ verification_mode: VerificationMode,
+ in_memory: bool = False,
+ ):
+ """as_dataset for a single split."""
+ if not isinstance(split, ReadInstruction):
+ split = str(split)
+ if split == "all":
+ split = "+".join(self.info.splits.keys())
+ split = Split(split)
+
+ # Build base dataset
+ ds = self._as_dataset(
+ split=split,
+ in_memory=in_memory,
+ )
+ if run_post_process:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resources_paths = {
+ resource_name: os.path.join(self._output_dir, resource_file_name)
+ for resource_name, resource_file_name in self._post_processing_resources(split).items()
+ }
+ post_processed = self._post_process(ds, resources_paths)
+ if post_processed is not None:
+ ds = post_processed
+ recorded_checksums = {}
+ record_checksums = False
+ for resource_name, resource_path in resources_paths.items():
+ size_checksum = get_size_checksum_dict(resource_path)
+ recorded_checksums[resource_name] = size_checksum
+ if verification_mode == VerificationMode.ALL_CHECKS and record_checksums:
+ if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
+ expected_checksums = None
+ else:
+ expected_checksums = self.info.post_processed.resources_checksums.get(split)
+ verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
+ if self.info.post_processed is None:
+ self.info.post_processed = PostProcessedInfo()
+ if self.info.post_processed.resources_checksums is None:
+ self.info.post_processed.resources_checksums = {}
+ self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
+ self.info.post_processing_size = sum(
+ checksums_dict["num_bytes"]
+ for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
+ for checksums_dict in split_checksums_dicts.values()
+ )
+ if self.info.dataset_size is not None and self.info.download_size is not None:
+ self.info.size_in_bytes = (
+ self.info.dataset_size + self.info.download_size + self.info.post_processing_size
+ )
+ self._save_info()
+ ds._info.post_processed = self.info.post_processed
+ ds._info.post_processing_size = self.info.post_processing_size
+ ds._info.size_in_bytes = self.info.size_in_bytes
+ if self.info.post_processed.features is not None:
+ if self.info.post_processed.features.type != ds.features.type:
+ raise ValueError(
+ f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}"
+ )
+ else:
+ ds.info.features = self.info.post_processed.features
+
+ return ds
+
+ def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset:
+ """Constructs a `Dataset`.
+
+ This is the internal implementation to overwrite called when user calls
+ `as_dataset`. It should read the pre-processed datasets files and generate
+ the `Dataset` object.
+
+ Args:
+ split (`datasets.Split`):
+ which subset of the data to read.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ `Dataset`
+ """
+ cache_dir = self._fs._strip_protocol(self._output_dir)
+ dataset_name = self.dataset_name
+ if self._check_legacy_cache():
+ dataset_name = self.name
+ dataset_kwargs = ArrowReader(cache_dir, self.info).read(
+ name=dataset_name,
+ instructions=split,
+ split_infos=self.info.splits.values(),
+ in_memory=in_memory,
+ )
+ fingerprint = self._get_dataset_fingerprint(split)
+ return Dataset(fingerprint=fingerprint, **dataset_kwargs)
+
+ def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str:
+ """The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs."""
+ hasher = Hasher()
+ hasher.update(Path(self._relative_data_dir()).as_posix())
+ hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder)
+ fingerprint = hasher.hexdigest()
+ return fingerprint
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ base_path: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ if is_remote_filesystem(self._fs):
+ raise NotImplementedError(
+ f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet."
+ )
+
+ dl_manager = StreamingDownloadManager(
+ base_path=base_path or self.base_path,
+ download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
+ dataset_name=self.dataset_name,
+ data_dir=self.config.data_dir,
+ )
+ self._check_manual_download(dl_manager)
+ splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}
+ # By default, return all splits
+ if split is None:
+ splits_generator = splits_generators
+ elif split in splits_generators:
+ splits_generator = splits_generators[split]
+ else:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}")
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ self._as_streaming_dataset_single,
+ splits_generator,
+ map_tuple=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _as_streaming_dataset_single(
+ self,
+ splits_generator,
+ ) -> IterableDataset:
+ ex_iterable = self._get_examples_iterable_for_split(splits_generator)
+ # add auth to be able to access and decode audio/image files from private repositories.
+ token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {}
+ return IterableDataset(
+ ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id
+ )
+
+ def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]:
+ """Run dataset transforms or add indexes"""
+ return None
+
+ def _post_processing_resources(self, split: str) -> Dict[str, str]:
+ """Mapping resource_name -> resource_file_name"""
+ return {}
+
+ def _download_post_processing_resources(
+ self, split: str, resource_name: str, dl_manager: DownloadManager
+ ) -> Optional[str]:
+ """Download the resource using the download manager and return the downloaded path."""
+ return None
+
+ @abc.abstractmethod
+ def _split_generators(self, dl_manager: Union[DownloadManager, StreamingDownloadManager]):
+ """Specify feature dictionary generators and dataset splits.
+
+ This function returns a list of `SplitGenerator`s defining how to generate
+ data and what splits to use.
+
+ Example:
+
+ return [
+ datasets.SplitGenerator(
+ name=datasets.Split.TRAIN,
+ gen_kwargs={'file': 'train_data.zip'},
+ ),
+ datasets.SplitGenerator(
+ name=datasets.Split.TEST,
+ gen_kwargs={'file': 'test_data.zip'},
+ ),
+ ]
+
+ The above code will first call `_generate_examples(file='train_data.zip')`
+ to write the train data, then `_generate_examples(file='test_data.zip')` to
+ write the test data.
+
+ Datasets are typically split into different subsets to be used at various
+ stages of training and evaluation.
+
+ Note that for datasets without a `VALIDATION` split, you can use a
+ fraction of the `TRAIN` data for evaluation as you iterate on your model
+ so as not to overfit to the `TEST` data.
+
+ For downloads and extractions, use the given `download_manager`.
+ Note that the `DownloadManager` caches downloads, so it is fine to have each
+ generator attempt to download the source data.
+
+ A good practice is to download all data in this function, and then
+ distribute the relevant parts to each split with the `gen_kwargs` argument
+
+ Args:
+ dl_manager (`Union[DownloadManager, StreamingDownloadManager]`):
+ Download manager to download the data
+
+ Returns:
+ `list`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Generate the examples and record them on disk.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ file_format (`str`, *optional*):
+ format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ **kwargs: Additional kwargs forwarded from _download_and_prepare (ex:
+ beam pipeline)
+ """
+ raise NotImplementedError()
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ """Generate the examples on the fly.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ """
+ raise NotImplementedError()
+
+
+class GeneratorBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on dict generators.
+
+ `GeneratorBasedBuilder` is a convenience class that abstracts away much
+ of the data writing and reading of `DatasetBuilder`. It expects subclasses to
+ implement generators of feature dictionaries across the dataset splits
+ (`_split_generators`). See the method docstrings for details.
+ """
+
+ @abc.abstractmethod
+ def _generate_examples(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `dict`, a feature dictionary
+ ready to be encoded and written to disk. The example will be
+ encoded with `self.info.features.encode_example({...})`.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ check_duplicate_keys: bool,
+ file_format="arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[int, str]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+
+ if self.info.splits is not None:
+ split_info = self.info.splits[split_generator.name]
+ else:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_input_shards < num_proc:
+ logger.warning(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=split_info.num_examples,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ "split_info": split_info,
+ "check_duplicate_keys": check_duplicate_keys,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_and_job: Tuple[int]):
+ shard_id, job_id = shard_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shards_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self,
+ gen_kwargs: dict,
+ fpath: str,
+ file_format: str,
+ max_shard_size: int,
+ split_info: SplitInfo,
+ check_duplicate_keys: bool,
+ job_id: int,
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ generator = self._generate_examples(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for key, record in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ example = self.info.features.encode_example(record) if self.info.features is not None else record
+ writer.write(example, key)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ super()._download_and_prepare(
+ dl_manager,
+ verification_mode,
+ check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
+ or verification_mode == VerificationMode.ALL_CHECKS,
+ **prepare_splits_kwargs,
+ )
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs)
+
+
+class ArrowBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet)."""
+
+ @abc.abstractmethod
+ def _generate_tables(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `pyarrow.Table`, a feature table
+ ready to be encoded and written to disk.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[str, int]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+
+ try:
+ split_info = self.info.splits[split_generator.name]
+ except Exception:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_input_shards < num_proc:
+ logger.warning(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=split_info.num_examples,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_id_and_job: Tuple[int]):
+ shard_id, job_id = shard_id_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shard_ids_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ gen_kwargs = {k: tracked_list(v) if isinstance(v, list) else v for k, v in gen_kwargs.items()}
+ generator = self._generate_tables(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for _, table in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ writer.write_table(table)
+ except CastError as cast_error:
+ raise DatasetGenerationCastError.from_cast_error(
+ cast_error=cast_error,
+ builder_name=self.info.builder_name,
+ gen_kwargs=gen_kwargs,
+ token=self.token,
+ )
+ num_examples_progress_update += len(table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ if isinstance(e, DatasetGenerationError):
+ raise
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs)
+
+
+class MissingBeamOptions(ValueError):
+ pass
+
+
+@deprecated("Use `GeneratorBasedBuilder` or `ArrowBasedBuilder` instead.")
+class BeamBasedBuilder(DatasetBuilder):
+ """Beam-based Builder."""
+
+ def __init__(self, *args, beam_runner=None, beam_options=None, **kwargs):
+ self._beam_runner = beam_runner
+ self._beam_options = beam_options
+ self._beam_writers = {} # {split: beam_writer} mapping.
+ super().__init__(*args, **kwargs)
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ # Pass `pipeline` into `_split_generators()` from `prepare_split_kwargs` if
+ # it's in the call signature of `_split_generators()`.
+ # This allows for global preprocessing in beam.
+ split_generators_kwargs = {}
+ split_generators_arg_names = inspect.signature(self._split_generators).parameters.keys()
+ if "pipeline" in split_generators_arg_names:
+ split_generators_kwargs["pipeline"] = prepare_split_kwargs["pipeline"]
+ return split_generators_kwargs
+
+ @abc.abstractmethod
+ def _build_pcollection(self, pipeline, **kwargs):
+ """Build the beam pipeline examples for each `SplitGenerator`.
+
+ This function extracts examples from the raw data with parallel transforms
+ in a Beam pipeline. It is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples from the PCollection will be
+ encoded and written to disk.
+
+
+ Warning: When running in a distributed setup, make sure that the data
+ which will be read (download_dir, manual_dir,...) and written (cache_dir)
+ can be accessed by the workers jobs. The data should be located in a
+ shared filesystem, like GCS.
+
+
+ Args:
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
+ Apache Beam pipeline.
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs.
+
+ Returns:
+ `beam.PCollection`: Apache Beam PCollection containing the
+ example to send to `self.info.features.encode_example(...)`.
+
+ Example:
+
+ ```
+ def _build_pcollection(pipeline, extracted_dir=None):
+ return (
+ pipeline
+ | beam.Create(gfile.io.listdir(extracted_dir))
+ | beam.Map(_process_file)
+ )
+ ```
+ """
+ raise NotImplementedError()
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ # Create the Beam pipeline and forward it to `_prepare_split`
+ import apache_beam as beam
+
+ import datasets.utils.beam_utils as beam_utils
+
+ beam_runner = self._beam_runner
+ beam_options = self._beam_options
+
+ if not beam_runner and not beam_options:
+ usage_example = f"load_dataset('{self.name}', '{self.config.name}', beam_runner='DirectRunner')"
+ raise MissingBeamOptions(
+ "Trying to generate a dataset using Apache Beam, yet no Beam Runner "
+ "or PipelineOptions() has been provided in `load_dataset` or in the "
+ "builder arguments. For big datasets it has to run on large-scale data "
+ "processing tools like Dataflow, Spark, etc. More information about "
+ "Apache Beam runners at "
+ "https://beam.apache.org/documentation/runners/capability-matrix/"
+ "\nIf you really want to run it locally because you feel like the "
+ "Dataset is small enough, you can use the local beam runner called "
+ "`DirectRunner` (you may run out of memory). \nExample of usage: "
+ f"\n\t`{usage_example}`"
+ )
+ if self._writer_batch_size is not None:
+ logger.warning(
+ "`writer_batch_size` is not supported for beam pipelines yet. Using the default chunk size for writing."
+ )
+
+ # Beam type checking assumes transforms multiple outputs are of same type,
+ # which is not our case. Plus it doesn't handle correctly all types, so we
+ # are better without it.
+ pipeline_options = {"pipeline_type_check": False}
+ if "num_proc" in prepare_splits_kwargs:
+ num_workers = prepare_splits_kwargs.pop("num_proc")
+ pipeline_options["direct_num_workers"] = num_workers
+ pipeline_options["num_workers"] = num_workers
+ pipeline_options["direct_running_mode"] = "multi_processing"
+ # TODO: Fix ModuleNotFoundError: No module named 'datasets_modules' when running multiprocessed DirectRunner
+ raise NotImplementedError("Using a DirectRunner with `num_proc` for multiprocessing it not supported yet.")
+ beam_options = beam_options or beam.options.pipeline_options.PipelineOptions.from_dictionary(pipeline_options)
+ # Use a single pipeline for all splits
+ pipeline = beam_utils.BeamPipeline(
+ runner=beam_runner,
+ options=beam_options,
+ )
+ super()._download_and_prepare(
+ dl_manager, verification_mode=VerificationMode.NO_CHECKS, pipeline=pipeline, **prepare_splits_kwargs
+ ) # TODO handle verification_mode in beam datasets
+ # Run pipeline
+ pipeline_results = pipeline.run()
+ pipeline_results.wait_until_finish()
+ metrics = pipeline_results.metrics()
+ # Update `info.splits`.
+ split_dict = self.info.splits
+ for split_name, beam_writer in self._beam_writers.items():
+ m_filter = beam.metrics.MetricsFilter().with_namespace(namespace=split_name)
+ num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter))
+ split_info = split_dict[split_name]
+ split_info.num_examples = num_examples
+ split_info.num_bytes = num_bytes
+ if hasattr(beam_writer, "_shard_lengths") and len(beam_writer._shard_lengths) > 1:
+ # keep the -SSSSS-of-NNNNN pattern
+ split_info.shard_lengths = beam_writer._shard_lengths
+ else:
+ # don't use any pattern
+ file_format = prepare_splits_kwargs.get("file_format", "arrow")
+ src_fname = f"{self.dataset_name}-{split_name}-00000-of-00001.{file_format}"
+ dst_fname = f"{self.dataset_name}-{split_name}.{file_format}"
+ src_fpath = posixpath.join(self._output_dir, src_fname)
+ dst_fpath = posixpath.join(self._output_dir, dst_fname)
+ self._rename(src_fpath, dst_fpath)
+
+ def _save_info(self):
+ download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ with xopen(f"{self._output_dir}/{config.DATASET_INFO_FILENAME}", "wb", download_config=download_config) as f:
+ self.info._dump_info(f)
+ if self.info.license:
+ with xopen(f"{self._output_dir}/{config.LICENSE_FILENAME}", "wb", download_config=download_config) as f:
+ self.info._dump_license(f)
+
+ def _prepare_split(
+ self, split_generator, pipeline, file_format="arrow", max_shard_size: Optional[Union[str, int]] = None
+ ):
+ import apache_beam as beam
+
+ if max_shard_size is not None:
+ raise NotImplementedError(
+ "max_shard_size is not supported for Beam datasets."
+ "Please set it to None to use the default Apache Beam sharding and get the best performance."
+ )
+
+ # To write examples in filesystem:
+ split_name = split_generator.split_info.name
+ fname = f"{self.dataset_name}-{split_name}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+ beam_writer = BeamWriter(
+ features=self.info.features, path=fpath, namespace=split_name, cache_dir=self._output_dir
+ )
+ self._beam_writers[split_name] = beam_writer
+
+ encode_example = self.info.features.encode_example
+
+ # Note: We need to wrap the pipeline in a PTransform to avoid re-using the
+ # same label names for each split
+ @beam.ptransform_fn
+ def _build_pcollection(pipeline):
+ """PTransformation which build a single split."""
+ # Encode the PCollection
+ pcoll_examples = self._build_pcollection(pipeline, **split_generator.gen_kwargs)
+ pcoll_examples |= "Encode" >> beam.Map(lambda key_ex: (key_ex[0], encode_example(key_ex[1])))
+ return beam_writer.write_from_pcollection(pcoll_examples)
+
+ # Add the PCollection to the pipeline
+ _ = pipeline | split_name >> _build_pcollection() # pylint: disable=no-value-for-parameter max_bytes_per_shard
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ self._request_info_from_hf_gcs()
+ datasets = {
+ split.name: IterableDataset(self._get_examples_iterable_for_split(split), info=self.info, split=split.name)
+ for split in self.info.splits.values()
+ }
+ if split:
+ try:
+ datasets = datasets[split]
+ except KeyError:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(datasets)}")
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _get_examples_iterable_for_split(self, split: SplitInfo) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples_from_hf_gcs, {"split": split})
+
+ def _generate_examples_from_hf_gcs(self, split: SplitInfo):
+ if split.shard_lengths:
+ num_shards = len(split.shard_lengths)
+ remote_prepared_urls = [
+ f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}-{shard_id:05d}-of-{num_shards:05d}.arrow"
+ for shard_id in range(num_shards)
+ ]
+ else:
+ remote_prepared_urls = [f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}.arrow"]
+ key = 0
+ download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ for remote_prepared_url in remote_prepared_urls:
+ with xopen(remote_prepared_url, "rb", download_config=download_config) as f:
+ with pa.ipc.open_stream(f) as reader:
+ for record_batch in reader:
+ for record in record_batch.to_pylist():
+ yield key, record
+ key += 1
+
+ def _request_info_from_hf_gcs(self):
+ from .download.streaming_download_manager import xopen
+
+ remote_dataset_info = f"{self._remote_cache_dir_from_hf_gcs}/{config.DATASET_INFO_FILENAME}"
+ try:
+ download_config = download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ with xopen(remote_dataset_info, download_config=download_config) as f:
+ import json
+
+ _info = json.load(f)
+ except FileNotFoundError as err:
+ raise DatasetNotOnHfGcsError(err) from None
+ self.info.update(DatasetInfo.from_dict(_info))
+
+ @property
+ def _remote_cache_dir_from_hf_gcs(self):
+ relative_data_dir = self._relative_data_dir(with_hash=False)
+ return HF_GCP_BASE_URL + "/" + Path(relative_data_dir).as_posix()
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/config.py b/llmeval-env/lib/python3.10/site-packages/datasets/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..9668dfbd91ef58dd12728cf52044ca03d49a92f6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/config.py
@@ -0,0 +1,272 @@
+import importlib
+import importlib.metadata
+import logging
+import os
+import platform
+from pathlib import Path
+from typing import Optional
+
+from packaging import version
+
+
+logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging
+
+# Datasets
+S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets"
+CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets"
+REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}"
+
+# Metrics
+S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
+CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
+REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}"
+
+# Hub
+HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
+HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
+HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}"
+HUB_DEFAULT_VERSION = "main"
+
+PY_VERSION = version.parse(platform.python_version())
+
+# General environment variables accepted values for booleans
+ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
+ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"}
+ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
+ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"})
+
+
+# Imports
+DILL_VERSION = version.parse(importlib.metadata.version("dill"))
+FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec"))
+PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
+PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
+HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub"))
+
+USE_TF = os.environ.get("USE_TF", "AUTO").upper()
+USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
+USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
+
+TORCH_VERSION = "N/A"
+TORCH_AVAILABLE = False
+
+if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
+ TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
+ if TORCH_AVAILABLE:
+ try:
+ TORCH_VERSION = version.parse(importlib.metadata.version("torch"))
+ logger.info(f"PyTorch version {TORCH_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling PyTorch because USE_TF is set")
+
+POLARS_VERSION = "N/A"
+POLARS_AVAILABLE = importlib.util.find_spec("polars") is not None
+
+if POLARS_AVAILABLE:
+ try:
+ POLARS_VERSION = version.parse(importlib.metadata.version("polars"))
+ logger.info(f"Polars version {POLARS_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+
+TF_VERSION = "N/A"
+TF_AVAILABLE = False
+
+if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
+ TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
+ if TF_AVAILABLE:
+ # For the metadata, we have to look for both tensorflow and tensorflow-cpu
+ for package in [
+ "tensorflow",
+ "tensorflow-cpu",
+ "tensorflow-gpu",
+ "tf-nightly",
+ "tf-nightly-cpu",
+ "tf-nightly-gpu",
+ "intel-tensorflow",
+ "tensorflow-rocm",
+ "tensorflow-macos",
+ ]:
+ try:
+ TF_VERSION = version.parse(importlib.metadata.version(package))
+ except importlib.metadata.PackageNotFoundError:
+ continue
+ else:
+ break
+ else:
+ TF_AVAILABLE = False
+ if TF_AVAILABLE:
+ if TF_VERSION.major < 2:
+ logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
+ TF_AVAILABLE = False
+ else:
+ logger.info(f"TensorFlow version {TF_VERSION} available.")
+else:
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
+
+
+JAX_VERSION = "N/A"
+JAX_AVAILABLE = False
+
+if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None
+ if JAX_AVAILABLE:
+ try:
+ JAX_VERSION = version.parse(importlib.metadata.version("jax"))
+ logger.info(f"JAX version {JAX_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling JAX because USE_JAX is set to False")
+
+
+USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper()
+BEAM_VERSION = "N/A"
+BEAM_AVAILABLE = False
+if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ try:
+ BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam"))
+ BEAM_AVAILABLE = True
+ logger.info(f"Apache Beam version {BEAM_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling Apache Beam because USE_BEAM is set to False")
+
+
+# Optional tools for data loading
+SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None
+
+# Optional tools for feature decoding
+PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None
+IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
+ importlib.import_module("soundfile").__libsndfile_version__
+) >= version.parse("1.0.31")
+IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
+ importlib.import_module("soundfile").__libsndfile_version__
+) >= version.parse("1.1.0")
+
+# Optional compression tools
+RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None
+ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None
+LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None
+PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None
+
+# Cache location
+DEFAULT_XDG_CACHE_HOME = "~/.cache"
+XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
+DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
+HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
+
+DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
+HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
+
+DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
+HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
+
+DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
+HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
+
+DOWNLOADED_DATASETS_DIR = "downloads"
+DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR)
+DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH))
+
+EXTRACTED_DATASETS_DIR = "extracted"
+DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR)
+EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH))
+
+# Download count for the website
+HF_UPDATE_DOWNLOAD_COUNTS = (
+ os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
+)
+
+# For downloads and to check remote files metadata
+HF_DATASETS_MULTITHREADING_MAX_WORKERS = 16
+
+# Remote dataset scripts support
+__HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "1")
+HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = (
+ True
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES
+ else False
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES
+ else None
+)
+TIME_OUT_REMOTE_CODE = 15
+
+# Dataset viewer API
+USE_PARQUET_EXPORT = True
+
+# Batch size constants. For more info, see:
+# https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations)
+DEFAULT_MAX_BATCH_SIZE = 1000
+
+# Size of the preloaded record batch in `Dataset.__iter__`
+ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10
+
+# Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare)
+MAX_SHARD_SIZE = "500MB"
+
+# Parquet configuration
+PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100
+PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100
+PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100
+
+# Offline mode
+HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
+
+# Here, `True` will disable progress bars globally without possibility of enabling it
+# programmatically. `False` will enable them without possibility of disabling them.
+# If environment variable is not set (None), then the user is free to enable/disable
+# them programmatically.
+# TL;DR: env variable has priority over code
+__HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS")
+HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = (
+ __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES
+ if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None
+ else None
+)
+
+# In-memory
+DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled
+IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE))
+
+# File names
+DATASET_ARROW_FILENAME = "dataset.arrow"
+DATASET_INDICES_FILENAME = "indices.arrow"
+DATASET_STATE_JSON_FILENAME = "state.json"
+DATASET_INFO_FILENAME = "dataset_info.json"
+DATASETDICT_INFOS_FILENAME = "dataset_infos.json"
+LICENSE_FILENAME = "LICENSE"
+METRIC_INFO_FILENAME = "metric_info.json"
+DATASETDICT_JSON_FILENAME = "dataset_dict.json"
+METADATA_CONFIGS_FIELD = "configs"
+REPOCARD_FILENAME = "README.md"
+REPOYAML_FILENAME = ".huggingface.yaml"
+
+MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules"
+
+MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255
+
+# Temporary cache directory prefix
+TEMP_CACHE_DIR_PREFIX = "hf_datasets-"
+
+# Streaming
+STREAMING_READ_MAX_RETRIES = 20
+STREAMING_READ_RETRY_INTERVAL = 5
+
+# Datasets without script
+DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
+GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10
+ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
+
+# Progress bars
+PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec
+
+# Maximum number of uploaded files per commit
+UPLOADS_MAX_NUMBER_PER_COMMIT = 50
+
+# Backward compatibiliy
+MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/distributed.py b/llmeval-env/lib/python3.10/site-packages/datasets/distributed.py
new file mode 100644
index 0000000000000000000000000000000000000000..e036fabaf2cf6231ae6a3ca2c443100ccbb0b4d5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/distributed.py
@@ -0,0 +1,39 @@
+from typing import TypeVar
+
+from .arrow_dataset import Dataset, _split_by_node_map_style_dataset
+from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset
+
+
+DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
+
+
+def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType:
+ """
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+
+ For map-style datasets:
+
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
+
+ For iterable datasets:
+
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
+ then the shards are evenly assigned across the nodes, which is the most optimized.
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
+
+ Args:
+ dataset ([`Dataset`] or [`IterableDataset`]):
+ The dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`.
+ """
+ if isinstance(dataset, Dataset):
+ return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size)
+ else:
+ return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/inspect.py b/llmeval-env/lib/python3.10/site-packages/datasets/inspect.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6178b52d5af912799d952106ca81d9ed54f8299
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/inspect.py
@@ -0,0 +1,582 @@
+# Copyright 2020 The HuggingFace Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""List and inspect datasets."""
+
+import inspect
+import os
+import shutil
+import warnings
+from pathlib import Path, PurePath
+from typing import Dict, List, Mapping, Optional, Sequence, Union
+
+import huggingface_hub
+
+from . import config
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadMode
+from .download.streaming_download_manager import StreamingDownloadManager
+from .info import DatasetInfo
+from .load import (
+ dataset_module_factory,
+ get_dataset_builder_class,
+ import_main_class,
+ load_dataset_builder,
+ metric_module_factory,
+)
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import relative_to_absolute_path
+from .utils.logging import get_logger
+from .utils.version import Version
+
+
+logger = get_logger(__name__)
+
+
+class SplitsNotFoundError(ValueError):
+ pass
+
+
+@deprecated("Use 'huggingface_hub.list_datasets' instead.")
+def list_datasets(with_community_datasets=True, with_details=False):
+ """List all the datasets scripts available on the Hugging Face Hub.
+
+ Args:
+ with_community_datasets (`bool`, *optional*, defaults to `True`):
+ Include the community provided datasets.
+ with_details (`bool`, *optional*, defaults to `False`):
+ Return the full details on the datasets instead of only the short name.
+
+ Example:
+
+ ```py
+ >>> from datasets import list_datasets
+ >>> list_datasets()
+ ['acronym_identification',
+ 'ade_corpus_v2',
+ 'adversarial_qa',
+ 'aeslc',
+ 'afrikaans_ner_corpus',
+ 'ag_news',
+ ...
+ ]
+ ```
+ """
+ datasets = huggingface_hub.list_datasets(full=with_details)
+ if not with_community_datasets:
+ datasets = [dataset for dataset in datasets if "/" not in dataset.id]
+ if not with_details:
+ datasets = [dataset.id for dataset in datasets]
+ return list(datasets)
+
+
+@deprecated(
+ "Use 'evaluate.list_evaluation_modules' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
+)
+def list_metrics(with_community_metrics=True, with_details=False):
+ """List all the metrics script available on the Hugging Face Hub.
+
+
+
+ Use `evaluate.list_evaluation_modules` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
+ with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
+
+ Example:
+
+ ```py
+ >>> from datasets import list_metrics
+ >>> list_metrics()
+ ['accuracy',
+ 'bertscore',
+ 'bleu',
+ 'bleurt',
+ 'cer',
+ 'chrf',
+ ...
+ ]
+ ```
+ """
+ metrics = huggingface_hub.list_metrics()
+ if not with_community_metrics:
+ metrics = [metric for metric in metrics if "/" not in metric.id]
+ if not with_details:
+ metrics = [metric.id for metric in metrics]
+ return metrics
+
+
+@deprecated("Clone the dataset repository from the Hugging Face Hub instead.")
+def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
+ """
+ Allow inspection/modification of a dataset script by copying on local drive at local_path.
+
+ Args:
+ path (`str`): Path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name
+ as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`.
+ local_path (`str`):
+ Path to the local folder to copy the dataset script to.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ **download_kwargs (additional keyword arguments):
+ Optional arguments for [`DownloadConfig`] which will override
+ the attributes of `download_config` if supplied.
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ if os.path.isfile(path):
+ path = str(Path(path).parent)
+ if os.path.isdir(path):
+ shutil.copytree(path, local_path, dirs_exist_ok=True)
+ else:
+ huggingface_hub.HfApi(endpoint=config.HF_ENDPOINT, token=download_config.token).snapshot_download(
+ repo_id=path, repo_type="dataset", local_dir=local_path, force_download=download_config.force_download
+ )
+ print(
+ f"The dataset {path} can be inspected at {local_path}. "
+ f'You can modify this loading script if it has one and use it with `datasets.load_dataset("{PurePath(local_path).as_posix()}")`.'
+ )
+
+
+@deprecated(
+ "Use 'evaluate.inspect_evaluation_module' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
+)
+def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
+ r"""
+ Allow inspection/modification of a metric script by copying it on local drive at local_path.
+
+
+
+ Use `evaluate.inspect_evaluation_module` instead, from the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
+ local_path (``str``): path to the local folder to copy the datset script to.
+ download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters.
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
+ """
+ metric_module = metric_module_factory(path, download_config=download_config, **download_kwargs)
+ metric_cls = import_main_class(metric_module.module_path, dataset=False)
+ module_source_path = inspect.getsourcefile(metric_cls)
+ module_source_dirpath = os.path.dirname(module_source_path)
+ for dirpath, dirnames, filenames in os.walk(module_source_dirpath):
+ dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath))
+ os.makedirs(dst_dirpath, exist_ok=True)
+ # skipping hidden directories; prune the search
+ dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))]
+ for filename in filenames:
+ shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename))
+ shutil.copystat(dirpath, dst_dirpath)
+ local_path = relative_to_absolute_path(local_path)
+ print(
+ f"The processing scripts for metric {path} can be inspected at {local_path}. "
+ f"The main class is in {module_source_dirpath}. "
+ f'You can modify this processing scripts and use it with `datasets.load_metric("{PurePath(local_path).as_posix()}")`.'
+ )
+
+
+def get_dataset_infos(
+ path: str,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+):
+ """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or``'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Optional attributes for builder class which will override the attributes if supplied.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_infos
+ >>> get_dataset_infos('rotten_tomatoes')
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ config_names = get_dataset_config_names(
+ path=path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ data_files=data_files,
+ token=token,
+ )
+ return {
+ config_name: get_dataset_config_info(
+ path=path,
+ config_name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ for config_name in config_names
+ }
+
+
+def get_dataset_config_names(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ **download_kwargs,
+):
+ """Get the list of available config names for a particular dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ **download_kwargs (additional keyword arguments):
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
+ for example `token`.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_config_names
+ >>> get_dataset_config_names("glue")
+ ['cola',
+ 'sst2',
+ 'mrpc',
+ 'qqp',
+ 'stsb',
+ 'mnli',
+ 'mnli_mismatched',
+ 'mnli_matched',
+ 'qnli',
+ 'rte',
+ 'wnli',
+ 'ax']
+ ```
+ """
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ data_files=data_files,
+ **download_kwargs,
+ )
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
+ return list(builder_cls.builder_configs.keys()) or [
+ dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default")
+ ]
+
+
+def get_dataset_default_config_name(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ **download_kwargs,
+) -> Optional[str]:
+ """Get the default config name for a particular dataset.
+ Can return None only if the dataset has multiple configurations and no default configuration.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ **download_kwargs (additional keyword arguments):
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
+ for example `token`.
+
+ Returns:
+ Optional[str]: the default config name if there is one
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_default_config_name
+ >>> get_dataset_default_config_name("openbookqa")
+ 'main'
+ ```
+ """
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ data_files=data_files,
+ **download_kwargs,
+ )
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
+ builder_configs = list(builder_cls.builder_configs.keys())
+ if builder_configs:
+ default_config_name = builder_configs[0] if len(builder_configs) == 1 else None
+ else:
+ default_config_name = "default"
+ return builder_cls.DEFAULT_CONFIG_NAME or default_config_name
+
+
+def get_dataset_config_info(
+ path: str,
+ config_name: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+) -> DatasetInfo:
+ """Get the meta information (DatasetInfo) about a dataset for a particular config
+
+ Args:
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
+ config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
+ data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
+ download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
+
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ builder = load_dataset_builder(
+ path,
+ name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ info = builder.info
+ if info.splits is None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ if token is not None:
+ download_config.token = token
+ builder._check_manual_download(
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
+ )
+ try:
+ info.splits = {
+ split_generator.name: {"name": split_generator.name, "dataset_name": path}
+ for split_generator in builder._split_generators(
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
+ )
+ }
+ except Exception as err:
+ raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
+ return info
+
+
+def get_dataset_split_names(
+ path: str,
+ config_name: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+):
+ """Get the list of available splits for a particular config and dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ config_name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Optional attributes for builder class which will override the attributes if supplied.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_split_names
+ >>> get_dataset_split_names('rotten_tomatoes')
+ ['train', 'validation', 'test']
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ info = get_dataset_config_info(
+ path,
+ config_name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ return list(info.splits.keys())
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/iterable_dataset.py b/llmeval-env/lib/python3.10/site-packages/datasets/iterable_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab03b4f486a8a66ed2be6bf53c7444cd08b32494
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/iterable_dataset.py
@@ -0,0 +1,2389 @@
+import copy
+import itertools
+import sys
+import warnings
+from collections import Counter
+from copy import deepcopy
+from dataclasses import dataclass
+from functools import partial
+from itertools import cycle, islice
+from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
+
+import fsspec.asyn
+import numpy as np
+import pyarrow as pa
+
+from . import config
+from .arrow_dataset import Dataset, DatasetInfoMixin
+from .features import Features
+from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects
+from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter
+from .info import DatasetInfo
+from .splits import NamedSplit
+from .table import cast_table_to_features, read_schema_from_file, table_cast
+from .utils.logging import get_logger
+from .utils.py_utils import Literal
+from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs
+
+
+logger = get_logger(__name__)
+
+Key = Union[int, str]
+
+
+def identity_func(x):
+ return x
+
+
+def _rename_columns_fn(example: Dict, column_mapping: Dict[str, str]):
+ if any(col not in example for col in column_mapping):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset."
+ )
+ if any(col in example for col in column_mapping.values()):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset."
+ )
+ return {
+ new_column_name: example[original_column_name]
+ for original_column_name, new_column_name in column_mapping.items()
+ }
+
+
+def add_column_fn(example: Dict, idx: int, name: str, column: List[Dict]):
+ if name in example:
+ raise ValueError(f"Error when adding {name}: column {name} is already in the dataset.")
+ return {name: column[idx]}
+
+
+def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features] = None) -> Features:
+ pa_table = pa.Table.from_pydict(batch)
+ if try_features is not None:
+ try:
+ pa_table = table_cast(pa_table, pa.schema(try_features.type))
+ except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError):
+ pass
+ return Features.from_arrow_schema(pa_table.schema)
+
+
+def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]:
+ # we order the columns by order of appearance
+ # to do so, we use a dict as an ordered set
+ cols = {col: None for example in examples for col in example}
+ # when an example is missing a column, we set the value to None with .get()
+ arrays = [[example.get(col) for example in examples] for col in cols]
+ return dict(zip(cols, arrays))
+
+
+def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]:
+ """Convert a batch (dict of examples) to examples list"""
+ n_examples = len(batch[next(iter(batch))])
+ for i in range(n_examples):
+ yield {col: array[i] for col, array in batch.items()}
+
+
+class _HasNextIterator(Iterator):
+ """Iterator with an hasnext() function. Taken from https://stackoverflow.com/questions/1966591/has-next-in-python-iterators."""
+
+ def __init__(self, it):
+ self.it = iter(it)
+ self._hasnext = None
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._hasnext:
+ result = self._thenext
+ else:
+ result = next(self.it)
+ self._hasnext = None
+ return result
+
+ def hasnext(self):
+ if self._hasnext is None:
+ try:
+ self._thenext = next(self.it)
+ except StopIteration:
+ self._hasnext = False
+ else:
+ self._hasnext = True
+ return self._hasnext
+
+
+def _convert_to_arrow(
+ iterable: Iterable[Tuple[Key, dict]],
+ batch_size: int,
+ drop_last_batch: bool = False,
+) -> Iterator[Tuple[Key, pa.Table]]:
+ """Convert and group examples in Arrow tables of size `batch_size`.
+
+ Args:
+ iterable (`Iterable[Tuple[Key, dict]]`):
+ An examples iterable containing tuples (example_key, example) of type (int/str, dict)
+ batch_size (`Optional[int]`):
+ Size of each sub-table to yield. If None or <= 0, yields the full table.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ if batch_size is None or batch_size <= 0:
+ yield (
+ "all",
+ pa.Table.from_pylist(cast_to_python_objects([example for _, example in iterable], only_1d_for_numpy=True)),
+ )
+ return
+ iterator = iter(iterable)
+ for key, example in iterator:
+ iterator_batch = islice(iterator, batch_size - 1)
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ if len(key_examples_list) < batch_size and drop_last_batch:
+ return
+ keys, examples = zip(*key_examples_list)
+ new_key = "_".join(str(key) for key in keys)
+ yield new_key, pa.Table.from_pylist(cast_to_python_objects(examples, only_1d_for_numpy=True))
+
+
+def _batch_arrow_tables(
+ iterable: Iterable[Tuple[Key, pa.Table]],
+ batch_size: Optional[int],
+ drop_last_batch: bool = False,
+) -> Iterator[Tuple[Key, pa.Table]]:
+ """Iterate over sub-tables of size `batch_size`.
+
+ Args:
+ iterable (`Iterable[Tuple[Key, pa.Table]]`):
+ A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table)
+ batch_size (`Optional[int]`):
+ Size of each sub-table to yield. If None or <= 0, yields the full table.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ if batch_size is None or batch_size <= 0:
+ yield "all", pa.concat_tables([pa_table for _, pa_table in iterable])
+ return
+ keys_buffer = []
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ for key, pa_table in iterable:
+ for chunk in pa_table.to_reader(max_chunksize=batch_size):
+ if len(chunk) == 0:
+ continue
+ elif chunks_buffer_size + len(chunk) < batch_size:
+ keys_buffer.append(key)
+ chunks_buffer.append(chunk)
+ chunks_buffer_size += len(chunk)
+ continue
+ elif chunks_buffer_size + len(chunk) == batch_size:
+ keys_buffer.append(key)
+ chunks_buffer.append(chunk)
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+ keys_buffer = []
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ else:
+ cropped_chunk_length = batch_size - chunks_buffer_size
+ keys_buffer.append(f"{key}[:{cropped_chunk_length}]")
+ chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+ keys_buffer = [f"{key}[{cropped_chunk_length}:]"]
+ chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
+ chunks_buffer_size = len(chunk) - cropped_chunk_length
+ if not drop_last_batch and chunks_buffer:
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+
+
+class _BaseExamplesIterable:
+ """Base class for the examples iterable used by an IterableDataset"""
+
+ def __init__(self) -> None:
+ self.iter_arrow: Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]] = None
+
+ def __iter__(self) -> Iterator[Tuple[Key, dict]]:
+ """An examples iterable should yield tuples (example_key, example) of type (int/str, dict)"""
+ raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet")
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable":
+ """
+ Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable.
+ If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self.
+ """
+ raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet")
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "_BaseExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet")
+
+ def split_shard_indices_by_worker(self, worker_id: int, num_workers: int) -> List[int]:
+ return list(range(worker_id, self.n_shards, num_workers))
+
+ @property
+ def n_shards(self) -> int:
+ raise NotImplementedError(f"{type(self)} doesn't implement n_shards yet")
+
+
+class ExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict):
+ super().__init__()
+ self.generate_examples_fn = generate_examples_fn
+ self.kwargs = kwargs
+
+ def __iter__(self):
+ yield from self.generate_examples_fn(**self.kwargs)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable":
+ return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
+ """Keep only the requested shard."""
+ gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
+ shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
+ requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
+ return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs)
+
+ @property
+ def n_shards(self) -> int:
+ return _number_of_shards_in_gen_kwargs(self.kwargs)
+
+
+class ShuffledDataSourcesExamplesIterable(ExamplesIterable):
+ def __init__(
+ self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator
+ ):
+ super().__init__(generate_examples_fn, kwargs)
+ self.generator = deepcopy(generator)
+
+ def __iter__(self):
+ """Shuffle the kwargs order to shuffle shards"""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ yield from self.generate_examples_fn(**kwargs_with_shuffled_shards)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
+ """Keep only the requested shard."""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources(
+ worker_id, num_workers
+ )
+
+
+class ArrowExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict):
+ super().__init__()
+ self.generate_tables_fn = generate_tables_fn
+ self.kwargs = kwargs
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ formatter = PythonFormatter()
+ for key, pa_table in self.generate_tables_fn(**self.kwargs):
+ for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
+ formatted_batch = formatter.format_batch(pa_subtable)
+ for example in _batch_to_examples(formatted_batch):
+ yield key, example
+
+ def _iter_arrow(self):
+ yield from self.generate_tables_fn(**self.kwargs)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable":
+ return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
+ """Keep only the requested shard."""
+ gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
+ shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
+ requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
+ return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs)
+
+ @property
+ def n_shards(self) -> int:
+ return _number_of_shards_in_gen_kwargs(self.kwargs)
+
+
+class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable):
+ def __init__(
+ self,
+ generate_tables_fn: Callable[..., Tuple[Key, pa.Table]],
+ kwargs: dict,
+ generator: np.random.Generator,
+ ):
+ super().__init__(generate_tables_fn, kwargs)
+ self.generator = deepcopy(generator)
+
+ def __iter__(self):
+ """Shuffle the kwargs order to shuffle shards"""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ formatter = PythonFormatter()
+ for key, pa_table in self.generate_tables_fn(**kwargs_with_shuffled_shards):
+ for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
+ formatted_batch = formatter.format_batch(pa_subtable)
+ for example in _batch_to_examples(formatted_batch):
+ yield key, example
+
+ def _iter_arrow(self):
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ yield from self.generate_tables_fn(**kwargs_with_shuffled_shards)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
+ """Keep only the requested shard."""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources(
+ worker_id, num_workers
+ )
+
+
+class SelectColumnsIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.column_names = column_names
+ if self.ex_iterable.iter_arrow:
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ for idx, row in self.ex_iterable:
+ yield idx, {c: row[c] for c in self.column_names}
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ for idx, pa_table in self.ex_iterable.iter_arrow():
+ yield idx, pa_table.select(self.column_names)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SelectColumnsIterable":
+ return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "SelectColumnsIterable":
+ return SelectColumnsIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), self.column_names)
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class StepExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.step = step
+ self.offset = offset
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ ex_iterator = iter(self.ex_iterable)
+ while True:
+ batch = list(islice(ex_iterator, self.step))
+ if len(batch) > self.offset:
+ yield batch[self.offset]
+ else:
+ break
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "StepExamplesIterable":
+ return StepExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "StepExamplesIterable":
+ return StepExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers), step=self.step, offset=self.offset
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterables: List[_BaseExamplesIterable],
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ ):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ self.stopping_strategy = stopping_strategy
+
+ # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
+ # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
+ self.bool_strategy_func = np.all if (stopping_strategy == "all_exhausted") else np.any
+ # TODO(QL): implement iter_arrow
+
+ def _get_indices_iterator(self):
+ # this is an infinite iterator to keep track of which iterator we want to pick examples from
+ return cycle(range(len(self.ex_iterables)))
+
+ def __iter__(self):
+ iterators = [_HasNextIterator(ex_iterable) for ex_iterable in self.ex_iterables]
+
+ indices_iterator = self._get_indices_iterator()
+
+ is_exhausted = np.full(len(self.ex_iterables), False)
+ for i in indices_iterator:
+ try: # let's pick one example from the iterator at index i
+ yield next(iterators[i])
+
+ # it will resume from the yield at the next call so that we can directly test if the iterable is exhausted and if we need to break out of the loop
+ if not iterators[i].hasnext():
+ is_exhausted[i] = True
+
+ if self.bool_strategy_func(is_exhausted):
+ # if the stopping criteria is met, break the main for loop
+ break
+ # otherwise reinitialise the iterator and yield the first example
+ iterators[i] = _HasNextIterator(self.ex_iterables[i])
+
+ except StopIteration:
+ # here it means that the i-th iterabledataset is empty, i.e we never have the occasion to yield an element of the i-th dataset.
+ # we still check if the stopping criteria is met and if we break out of the loop in case of an oversampling strategy
+ is_exhausted[i] = True
+
+ if self.bool_strategy_func(is_exhausted):
+ # if the stopping criteria is met, break the main for loop
+ break
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable":
+ """Shuffle each underlying examples iterable."""
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
+ return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy)
+
+ @property
+ def n_shards(self) -> int:
+ return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "CyclingMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return CyclingMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
+ stopping_strategy=self.stopping_strategy,
+ )
+
+
+class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ """
+ VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables.
+ It doesn't require the examples iterables to always yield the same columns.
+ Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
+
+ For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
+ We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
+
+ Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
+ This is done with `_apply_feature_types_on_example`.
+ """
+
+ def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ if all(ex_iterable.iter_arrow is not None for ex_iterable in ex_iterables):
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ for ex_iterable in self.ex_iterables:
+ yield from ex_iterable
+
+ def _iter_arrow(self):
+ for ex_iterable in self.ex_iterables:
+ yield from ex_iterable.iter_arrow()
+
+ def shuffle_data_sources(
+ self, generator: np.random.Generator
+ ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
+ """Shuffle the list of examples iterable, as well as each underlying examples iterable."""
+ rng = deepcopy(generator)
+ ex_iterables = list(self.ex_iterables)
+ rng.shuffle(ex_iterables)
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables]
+ return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+
+ @property
+ def n_shards(self) -> int:
+ return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
+
+ def shard_data_sources(
+ self, worker_id: int, num_workers: int
+ ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return VerticallyConcatenatedMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
+ )
+
+
+def _check_column_names(column_names: List[str]):
+ """Check the column names to make sure they don't contain duplicates."""
+ counter = Counter(column_names)
+ if not all(count == 1 for count in counter.values()):
+ duplicated_columns = [col for col in counter if counter[col] > 1]
+ raise ValueError(
+ f"The examples iterables can't have duplicated columns but columns {duplicated_columns} are duplicated."
+ )
+
+
+class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ """
+ HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables.
+ It also checks that there are no duplicate columns (otherwise we don't know which one to keep).
+ This check is done once when yielding the first example.
+
+ However it doesn't fill missing columns with None.
+ Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
+
+ For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
+ We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
+
+ Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
+ This is done with `_apply_feature_types_on_example`.
+ """
+
+ def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
+ for i in itertools.count():
+ keys = []
+ examples = []
+ for ex_iterator in list(ex_iterators):
+ try:
+ key, example = next(ex_iterator)
+ keys.append(key)
+ examples.append(example)
+ except StopIteration:
+ ex_iterators.remove(ex_iterator)
+ if ex_iterators:
+ if i == 0:
+ _check_column_names([column_name for example in examples for column_name in example])
+ new_example = {}
+ for example in examples:
+ new_example.update(example)
+ new_key = "_".join(str(key) for key in keys)
+ yield new_key, new_example
+ else:
+ break
+
+ def shuffle_data_sources(
+ self, generator: np.random.Generator
+ ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would break the alignment between them."""
+ return self
+
+ @property
+ def n_shards(self) -> int:
+ return 1
+
+ def shard_data_sources(
+ self, worker_id: int, num_workers: int
+ ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return HorizontallyConcatenatedMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
+ )
+
+
+class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable):
+ def __init__(
+ self,
+ ex_iterables: List[_BaseExamplesIterable],
+ generator: np.random.Generator,
+ probabilities: Optional[List[float]] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ ):
+ super().__init__(ex_iterables, stopping_strategy)
+ self.generator = deepcopy(generator)
+ self.probabilities = probabilities
+ # TODO(QL): implement iter_arrow
+
+ @staticmethod
+ def _iter_random_indices(
+ rng: np.random.Generator,
+ num_sources: int,
+ random_batch_size=1000,
+ p: Optional[List[float]] = None,
+ ) -> Iterator[int]:
+ """Get an infinite iterator that randomly samples the index of the source to pick examples from."""
+ if p is None:
+ while True:
+ yield from (int(i) for i in rng.integers(0, num_sources, size=random_batch_size))
+ else:
+ while True:
+ yield from (int(i) for i in rng.choice(num_sources, size=random_batch_size, p=p))
+
+ def _get_indices_iterator(self):
+ rng = deepcopy(self.generator)
+ # this is an infinite iterator that randomly samples the index of the source to pick examples from
+ return self._iter_random_indices(rng, len(self.ex_iterables), p=self.probabilities)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable":
+ """Shuffle the data sources of each wrapped examples iterable."""
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
+ return RandomlyCyclingMultiSourcesExamplesIterable(
+ ex_iterables,
+ generator=generator,
+ probabilities=self.probabilities,
+ stopping_strategy=self.stopping_strategy,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "RandomlyCyclingMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return RandomlyCyclingMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
+ self.generator,
+ self.probabilities,
+ self.stopping_strategy,
+ )
+
+
+class MappedExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[List[str]] = None,
+ fn_kwargs: Optional[dict] = None,
+ formatting: Optional["FormattingConfig"] = None,
+ format_type="deprecated",
+ ):
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.drop_last_batch = drop_last_batch
+ self.remove_columns = remove_columns
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+ self.fn_kwargs = fn_kwargs or {}
+ self.formatting = formatting
+ if self.formatting and self.formatting.format_type == "arrow":
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ if self.formatting and self.formatting.format_type == "arrow":
+ yield from ArrowExamplesIterable(self._iter_arrow, {})
+ else:
+ yield from self._iter()
+
+ def _iter(self):
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+
+ if self.formatting:
+ formatter = get_formatter(self.formatting.format_type)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self.batched:
+ for key, example in iterator:
+ # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
+ iterator_batch = (
+ iterator
+ if self.batch_size is None or self.batch_size <= 0
+ else islice(iterator, self.batch_size - 1)
+ )
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ keys, examples = zip(*key_examples_list)
+ if (
+ self.drop_last_batch
+ and self.batch_size is not None
+ and self.batch_size > 0
+ and len(examples) < self.batch_size
+ ): # ignore last batch
+ return
+ batch = _examples_to_batch(examples)
+ batch = format_dict(batch) if format_dict else batch
+ # then apply the transform
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ transformed_batch = dict(batch) # this will be updated with the function output
+ transformed_batch.update(self.function(*function_args, **self.fn_kwargs))
+ # then remove the unwanted columns
+ if self.remove_columns:
+ for c in self.remove_columns:
+ del transformed_batch[c]
+ if transformed_batch:
+ first_col = next(iter(transformed_batch))
+ bad_cols = [
+ col
+ for col in transformed_batch
+ if len(transformed_batch[col]) != len(transformed_batch[first_col])
+ ]
+ if bad_cols:
+ raise ValueError(
+ f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} while {first_col} has length {len(transformed_batch[first_col])}."
+ )
+ # the new key is the concatenation of the examples keys from the batch
+ new_key = "_".join(str(key) for key in keys)
+ # yield one example at a time from the transformed batch
+ for example in _batch_to_examples(transformed_batch):
+ yield new_key, example
+ current_idx += 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the transform and yield the example directly
+ # first copy the example, since we might drop some keys
+ example = dict(example)
+ example = format_dict(example) if format_dict else example
+ # then apply the transform
+ inputs = example
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ transformed_example = dict(example) # this will be updated with the function output
+ transformed_example.update(self.function(*function_args, **self.fn_kwargs))
+ # then we remove the unwanted columns
+ if self.remove_columns:
+ for c in self.remove_columns:
+ del transformed_example[c]
+ yield key, transformed_example
+ current_idx += 1
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ if self.ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ self.ex_iterable.iter_arrow(),
+ batch_size=self.batch_size if self.batched else 1,
+ drop_last_batch=self.drop_last_batch,
+ )
+ else:
+ iterator = _convert_to_arrow(
+ self.ex_iterable,
+ batch_size=self.batch_size if self.batched else 1,
+ drop_last_batch=self.drop_last_batch,
+ )
+ current_idx = 0
+ for key, pa_table in iterator:
+ # first build the batch
+ function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
+ if self.with_indices:
+ if self.batched:
+ function_args.append([current_idx + i for i in range(len(pa_table))])
+ else:
+ function_args.append(current_idx)
+ # then apply the transform
+ output_table = self.function(*function_args, **self.fn_kwargs)
+ if not isinstance(output_table, pa.Table):
+ raise TypeError(
+ f"Provided `function` which is applied to pyarrow tables returns a variable of type {type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset."
+ )
+ # we don't need to merge results for consistency with Dataset.map which merges iif both input and output are dicts
+ # then remove the unwanted columns
+ if self.remove_columns:
+ for column in self.remove_columns:
+ if column in output_table.column_names:
+ output_table = output_table.remove_column(output_table.column_names.index(column))
+ # return output
+ yield key, output_table
+ current_idx += len(pa_table)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return MappedExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ drop_last_batch=self.drop_last_batch,
+ remove_columns=self.remove_columns,
+ fn_kwargs=self.fn_kwargs,
+ formatting=self.formatting,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "MappedExamplesIterable":
+ """Keep only the requested shard."""
+ return MappedExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ drop_last_batch=self.drop_last_batch,
+ remove_columns=self.remove_columns,
+ fn_kwargs=self.fn_kwargs,
+ formatting=self.formatting,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class FilteredExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ formatting: Optional["FormattingConfig"] = None,
+ format_type="deprecated",
+ ):
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+ self.fn_kwargs = fn_kwargs or {}
+ self.formatting = formatting
+ if self.formatting and self.formatting.format_type == "arrow":
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ if self.formatting and self.formatting.format_type == "arrow":
+ yield from ArrowExamplesIterable(self._iter_arrow, {})
+ else:
+ yield from self._iter()
+
+ def _iter(self):
+ if self.formatting:
+ formatter = get_formatter(self.formatting.format_type)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+ if self.batched:
+ for key, example in iterator:
+ # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
+ iterator_batch = (
+ iterator
+ if self.batch_size is None or self.batch_size <= 0
+ else islice(iterator, self.batch_size - 1)
+ )
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ keys, examples = zip(*key_examples_list)
+ batch = _examples_to_batch(examples)
+ batch = format_dict(batch) if format_dict else batch
+ # then compute the mask for the batch
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ mask = self.function(*function_args, **self.fn_kwargs)
+ # yield one example at a time from the batch
+ for key_example, to_keep in zip(key_examples_list, mask):
+ if to_keep:
+ yield key_example
+ current_idx += 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the filtering function direcly
+ example = dict(example)
+ inputs = format_dict(example) if format_dict else example
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ to_keep = self.function(*function_args, **self.fn_kwargs)
+ if to_keep:
+ yield key, example
+ current_idx += 1
+
+ def _iter_arrow(self):
+ if self.ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ self.ex_iterable.iter_arrow(), batch_size=self.batch_size if self.batched else 1
+ )
+ else:
+ iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1)
+ current_idx = 0
+ for key, pa_table in iterator:
+ # first build the batch
+ function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
+ if self.with_indices:
+ if self.batched:
+ function_args.append([current_idx + i for i in range(len(pa_table))])
+ else:
+ function_args.append(current_idx)
+ # then apply the transform
+ mask = self.function(*function_args, **self.fn_kwargs)
+ # yield the filtered table
+ if self.batched:
+ yield key, pa_table.filter(mask)
+ elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask:
+ yield key, pa_table
+ current_idx += len(pa_table)
+
+ def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(seed),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "FilteredExamplesIterable":
+ """Keep only the requested shard."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class BufferShuffledExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.buffer_size = buffer_size
+ self.generator = generator
+ # TODO(QL): implement iter_arrow
+
+ @staticmethod
+ def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]:
+ while True:
+ yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
+
+ def __iter__(self):
+ buffer_size = self.buffer_size
+ rng = deepcopy(self.generator)
+ indices_iterator = self._iter_random_indices(rng, buffer_size)
+ # this is the shuffle buffer that we keep in memory
+ mem_buffer = []
+ for x in self.ex_iterable:
+ if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it
+ i = next(indices_iterator)
+ yield mem_buffer[i]
+ mem_buffer[i] = x # replace the picked example by a new one
+ else: # otherwise, keep filling the buffer
+ mem_buffer.append(x)
+ # when we run out of examples, we shuffle the remaining examples in the buffer and yield them
+ rng.shuffle(mem_buffer)
+ yield from mem_buffer
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable":
+ """Shuffle the wrapped examples iterable as well as the shuffling buffer."""
+ return BufferShuffledExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "BufferShuffledExamplesIterable":
+ """Keep only the requested shard."""
+ return BufferShuffledExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ buffer_size=self.buffer_size,
+ generator=self.generator,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class SkipExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.n = n
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ yield from islice(self.ex_iterable, self.n, None)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would skip examples from other shards instead."""
+ return self
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class TakeExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.n = n
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ yield from islice(self.ex_iterable, self.n)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would take examples from other shards instead."""
+ return self
+
+ @staticmethod
+ def split_number(num, n):
+ quotient = num // n
+ remainder = num % n
+ result = [quotient] * n
+ for i in range(remainder):
+ result[i] += 1
+ return result
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "TakeExamplesIterable":
+ """Keep only the requested shard."""
+ return TakeExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ n=self.split_number(self.n, num_workers)[worker_id],
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+def _apply_feature_types_on_example(
+ example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
+) -> dict:
+ example = dict(example)
+ # add missing columns
+ for column_name in features:
+ if column_name not in example:
+ example[column_name] = None
+ # we encode the example for ClassLabel feature types for example
+ encoded_example = features.encode_example(example)
+ # Decode example for Audio feature, e.g.
+ decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id)
+ return decoded_example
+
+
+def _apply_feature_types_on_batch(
+ batch: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
+) -> dict:
+ batch = dict(batch)
+ # add missing columns
+ n_examples = len(batch[next(iter(batch))])
+ for column_name in features:
+ if column_name not in batch:
+ batch[column_name] = [None] * n_examples
+ # we encode the batch for ClassLabel feature types for example
+ encoded_batch = features.encode_batch(batch)
+ # Decode batch for Audio feature, e.g.
+ decoded_batch = features.decode_batch(encoded_batch, token_per_repo_id=token_per_repo_id)
+ return decoded_batch
+
+
+class TypedExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ features: Features,
+ token_per_repo_id: Dict[str, Union[str, bool, None]],
+ ):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.features = features
+ self.token_per_repo_id = token_per_repo_id
+ if self.ex_iterable.iter_arrow is not None:
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ # Then for each example, `TypedExamplesIterable` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ for key, example in self.ex_iterable:
+ yield (
+ key,
+ _apply_feature_types_on_example(example, self.features, token_per_repo_id=self.token_per_repo_id),
+ )
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ schema = self.features.arrow_schema
+ for key, pa_table in self.ex_iterable.iter_arrow():
+ columns = set(pa_table.column_names)
+ # add missing columns
+ for column_name in self.features:
+ if column_name not in columns:
+ col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None])
+ pa_table = pa_table.append_column(column_name, col)
+ if pa_table.schema != schema:
+ pa_table = cast_table_to_features(pa_table, self.features)
+ yield key, pa_table
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "TypedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return TypedExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator),
+ features=self.features,
+ token_per_repo_id=self.token_per_repo_id,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "TypedExamplesIterable":
+ """Keep only the requested shard."""
+ return TypedExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ features=self.features,
+ token_per_repo_id=self.token_per_repo_id,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+@dataclass
+class FormattingConfig:
+ format_type: Optional[str]
+
+ def __post_init__(self):
+ if self.format_type == "pandas":
+ raise NotImplementedError(
+ "The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead."
+ )
+
+
+@dataclass
+class ShufflingConfig:
+ generator: np.random.Generator
+ _original_seed: Optional[int] = None
+
+
+@dataclass
+class DistributedConfig:
+ rank: int
+ world_size: int
+
+
+def _maybe_add_torch_iterable_dataset_parent_class(cls):
+ """Add torch.utils.data.IterableDataset as a parent class if 'torch' is available"""
+ if config.TORCH_AVAILABLE:
+ import torch.utils.data
+
+ if torch.utils.data.IterableDataset not in cls.__bases__:
+ cls.__bases__ += (torch.utils.data.IterableDataset,)
+
+
+class IterableDataset(DatasetInfoMixin):
+ """A Dataset backed by an iterable."""
+
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ formatting: Optional[FormattingConfig] = None,
+ shuffling: Optional[ShufflingConfig] = None,
+ distributed: Optional[DistributedConfig] = None,
+ token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
+ format_type="deprecated",
+ ):
+ if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None:
+ raise RuntimeError(
+ "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. "
+ "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. "
+ )
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+
+ info = info.copy() if info is not None else DatasetInfo()
+ DatasetInfoMixin.__init__(self, info=info, split=split)
+
+ self._ex_iterable = ex_iterable
+ self._formatting = formatting
+ self._shuffling = shuffling
+ self._distributed = distributed
+ self._epoch = 0
+ self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {}
+ _maybe_add_torch_iterable_dataset_parent_class(self.__class__)
+
+ def __repr__(self):
+ return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n n_shards: {self.n_shards}\n}})"
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling
+ _maybe_add_torch_iterable_dataset_parent_class(self.__class__)
+
+ def _head(self, n=5):
+ return _examples_to_batch(list(self.take(n)))
+
+ def _effective_generator(self):
+ if self._shuffling and self._epoch == 0:
+ return self._shuffling.generator
+ elif self._shuffling:
+ # Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars)
+ effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch
+ effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed
+ return np.random.default_rng(effective_seed)
+ else:
+ raise ValueError("This dataset is not shuffled")
+
+ @property
+ def n_shards(self) -> int:
+ if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0:
+ return self._ex_iterable.n_shards // self._distributed.world_size
+ return self._ex_iterable.n_shards
+
+ def _iter_pytorch(self):
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ # Fix for fsspec when using multiprocess to avoid hanging in the ML training loop. (only required for fsspec >= 0.9.0)
+ # See https://github.com/fsspec/gcsfs/issues/379
+ fsspec.asyn.reset_lock()
+ # check if there aren't too many workers
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers:
+ logger.warning(
+ f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). "
+ f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers."
+ )
+ logger.info(
+ f"To parallelize data loading, we give each process some shards (or data sources) to process. "
+ f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. "
+ f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}."
+ )
+ # split workload
+ _log_prefix = f"node#{self._distributed.rank} " if self._distributed else ""
+ shards_indices = ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers)
+ if shards_indices:
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards."
+ )
+ ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers)
+
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=1)
+ for key, pa_table in iterator:
+ yield formatter.format_row(pa_table)
+ return
+ else:
+ for key, example in ex_iterable:
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ example = _apply_feature_types_on_example(
+ example, self.features, token_per_repo_id=self._token_per_repo_id
+ )
+ yield format_dict(example) if format_dict else example
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards."
+ )
+ else:
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})."
+ )
+
+ def _is_main_process(self):
+ if self._distributed and self._distributed.rank > 0:
+ return False
+ if "torch" in sys.modules:
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if worker_info is not None and worker_info.id > 0:
+ return False
+ return True
+
+ def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable:
+ if self._shuffling:
+ ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator())
+ else:
+ ex_iterable = self._ex_iterable
+
+ if self._distributed:
+ rank = self._distributed.rank
+ world_size = self._distributed.world_size
+ if ex_iterable.n_shards % world_size == 0:
+ if self._is_main_process():
+ n_shards_per_node = ex_iterable.n_shards // world_size
+ plural = "s" if n_shards_per_node > 1 else ""
+ logger.info(
+ f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node."
+ )
+ ex_iterable = ex_iterable.shard_data_sources(rank, world_size)
+ else:
+ if self._is_main_process():
+ logger.info(
+ f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration."
+ )
+ logger.info(
+ f"It is more optimized to distribute the dataset shards (or data sources) across nodes. "
+ f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. "
+ f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}"
+ )
+ ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank)
+
+ return ex_iterable
+
+ def __iter__(self):
+ if "torch" in sys.modules:
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None:
+ # We're a torch.utils.data.IterableDataset in a PyTorch worker process
+ yield from self._iter_pytorch()
+ return
+
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=1)
+ for key, pa_table in iterator:
+ yield formatter.format_row(pa_table)
+ return
+
+ for key, example in ex_iterable:
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ example = _apply_feature_types_on_example(
+ example, self.features, token_per_repo_id=self._token_per_repo_id
+ )
+ yield format_dict(example) if format_dict else example
+
+ def iter(self, batch_size: int, drop_last_batch: bool = False):
+ """Iterate through the batches of size `batch_size`.
+
+ Args:
+ batch_size (:obj:`int`): size of each batch to yield.
+ drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
+ dropped
+ """
+
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch
+ )
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch)
+ for key, pa_table in iterator:
+ yield formatter.format_batch(pa_table)
+ return
+
+ iterator = iter(ex_iterable)
+ for key, example in iterator:
+ # If batched, first build the batch
+ examples = [example] + [example for key, example in islice(iterator, batch_size - 1)]
+ if drop_last_batch and len(examples) < batch_size: # ignore last batch
+ return
+ batch = _examples_to_batch(examples)
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_batch`.
+ batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id)
+ yield format_dict(batch) if format_dict else batch
+
+ @staticmethod
+ def from_generator(
+ generator: Callable,
+ features: Optional[Features] = None,
+ gen_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """Create an Iterable Dataset from a generator.
+
+ Args:
+ generator (`Callable`):
+ A generator function that `yields` examples.
+ features (`Features`, *optional*):
+ Dataset features.
+ gen_kwargs(`dict`, *optional*):
+ Keyword arguments to be passed to the `generator` callable.
+ You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`.
+ This can be used to improve shuffling and when iterating over the dataset with multiple workers.
+
+ Returns:
+ `IterableDataset`
+
+ Example:
+
+ ```py
+ >>> def gen():
+ ... yield {"text": "Good", "label": 0}
+ ... yield {"text": "Bad", "label": 1}
+ ...
+ >>> ds = IterableDataset.from_generator(gen)
+ ```
+
+ ```py
+ >>> def gen(shards):
+ ... for shard in shards:
+ ... with open(shard) as f:
+ ... for line in f:
+ ... yield {"line": line}
+ ...
+ >>> shards = [f"data{i}.txt" for i in range(32)]
+ >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
+ >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
+ >>> from torch.utils.data import DataLoader
+ >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
+ ```
+ """
+ from .io.generator import GeneratorDatasetInputStream
+
+ return GeneratorDatasetInputStream(
+ generator=generator,
+ features=features,
+ gen_kwargs=gen_kwargs,
+ streaming=True,
+ ).read()
+
+ @staticmethod
+ def from_spark(
+ df: "pyspark.sql.DataFrame",
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ **kwargs,
+ ) -> "IterableDataset":
+ """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches.
+
+ Args:
+ df (`pyspark.sql.DataFrame`):
+ The DataFrame containing the desired data.
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+
+ Returns:
+ [`IterableDataset`]
+
+ Example:
+
+ ```py
+ >>> df = spark.createDataFrame(
+ >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
+ >>> columns=["id", "name"],
+ >>> )
+ >>> ds = IterableDataset.from_spark(df)
+ ```
+ """
+ from .io.spark import SparkDatasetReader
+
+ if sys.platform == "win32":
+ raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows")
+
+ return SparkDatasetReader(
+ df,
+ split=split,
+ features=features,
+ streaming=True,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_file(filename: str) -> "IterableDataset":
+ """Instantiate a IterableDataset from Arrow table at filename.
+
+ Args:
+ filename (`str`):
+ File name of the dataset.
+
+ Returns:
+ [`IterableDataset`]
+ """
+ pa_table_schema = read_schema_from_file(filename)
+ inferred_features = Features.from_arrow_schema(pa_table_schema)
+ ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename})
+ return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features))
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ ) -> "IterableDataset":
+ """
+ Return a dataset with the specified format.
+ Supported formats: "arrow", or None for regular python objects.
+ The other formats are currently not implemented.
+
+ Args:
+
+ type (`str`, optional, default None): if set to "torch", the returned dataset
+ will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader
+ """
+ type = get_format_type_from_alias(type)
+ # TODO(QL): add format_kwargs
+ # TODO(QL): add format_columns and return_all_columns
+ # TODO(QL): add pandas format
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=FormattingConfig(format_type=type),
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ features: Optional[Features] = None,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """
+ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
+ If your function returns a column that already exists, then it overwrites it.
+ The function is applied on-the-fly on the examples when iterating over the dataset.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}.
+ - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`, *optional*, defaults to `None`):
+ Function applied on-the-fly on the examples when you iterate on the dataset.
+ It must have one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`[List[str]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ features (`[Features]`, *optional*, defaults to `None`):
+ Feature types of the resulting dataset.
+ fn_kwargs (`Dict`, *optional*, default `None`):
+ Keyword arguments to be passed to `function`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
+ ```
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+ if isinstance(remove_columns, str):
+ remove_columns = [remove_columns]
+ if function is None:
+ function = identity_func
+ if fn_kwargs is None:
+ fn_kwargs = {}
+ ex_iterable = MappedExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ fn_kwargs=fn_kwargs,
+ formatting=self._formatting,
+ )
+ info = self.info.copy()
+ info.features = features
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices=False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
+ The filtering is done on-the-fly when iterating over the dataset.
+
+ Args:
+ function (`Callable`):
+ Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
+ - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
+ - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
+ - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
+
+ If no function is provided, defaults to an always True function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, default `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ fn_kwargs (`Dict`, *optional*, default `None`):
+ Keyword arguments to be passed to `function`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> ds = ds.filter(lambda x: x["label"] == 0)
+ >>> list(ds.take(3))
+ [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'},
+ {'label': 0,
+ 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
+ {'label': 0,
+ 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
+ ```
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example)
+ info = copy.deepcopy(self._info)
+ info.features = None
+
+ # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here
+ ex_iterable = FilteredExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ fn_kwargs=fn_kwargs,
+ formatting=self._formatting,
+ )
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def shuffle(
+ self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
+ ) -> "IterableDataset":
+ """
+ Randomly shuffles the elements of this dataset.
+
+ This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer,
+ replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
+ equal to the full size of the dataset is required.
+
+ For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
+ initially select a random element from only the first 1000 elements in the buffer. Once an element is
+ selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
+ maintaining the 1000 element buffer.
+
+ If the dataset is made of several shards, it also does shuffle the order of the shards.
+ However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
+ then the order of the shards is kept unchanged.
+
+ Args:
+ seed (`int`, *optional*, defaults to `None`):
+ Random seed that will be used to shuffle the dataset.
+ It is used to sample from the shuffle buffer and also to shuffle the data shards.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ buffer_size (`int`, defaults to `1000`):
+ Size of the buffer.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> list(shuffled_ds.take(3))
+ [{'label': 1,
+ 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
+ {'label': 1,
+ 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
+ {'label': 1,
+ 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
+ ```
+ """
+ if generator is None:
+ generator = np.random.default_rng(seed)
+ else:
+ generator = deepcopy(generator)
+ shuffling = ShufflingConfig(generator=generator, _original_seed=seed)
+ return IterableDataset(
+ ex_iterable=BufferShuffledExamplesIterable(
+ self._ex_iterable, buffer_size=buffer_size, generator=generator
+ ).shuffle_data_sources(generator),
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=shuffling,
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def set_epoch(self, epoch: int):
+ self._epoch = epoch
+
+ def skip(self, n: int) -> "IterableDataset":
+ """
+ Create a new [`IterableDataset`] that skips the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to skip.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.skip(1)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'},
+ {'label': 1,
+ 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
+ ```
+ """
+ ex_iterable = SkipExamplesIterable(self._ex_iterable, n)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def take(self, n: int) -> "IterableDataset":
+ """
+ Create a new [`IterableDataset`] with only the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to take.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> small_ds = ds.take(2)
+ >>> list(small_ds)
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
+ ```
+ """
+ ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ @property
+ def column_names(self) -> Optional[List[str]]:
+ """Names of the columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
+ >>> ds.column_names
+ ['text', 'label']
+ ```
+ """
+ return list(self._info.features.keys()) if self._info.features is not None else None
+
+ def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset":
+ """Add column to Dataset.
+
+ Args:
+ name (str): Column name.
+ column (list or np.array): Column data to be added.
+
+ Returns:
+ `IterableDataset`
+ """
+ return self.map(partial(add_column_fn, name=name, column=column), with_indices=True)
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ >>> ds = ds.rename_column("text", "movie_review")
+ >>> next(iter(ds))
+ {'label': 1,
+ 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return self.rename_columns({original_column_name: new_column_name})
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with renamed columns
+ """
+
+ original_features = self._info.features.copy() if self._info.features else None
+ ds_iterable = self.map(
+ partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping)
+ )
+ if original_features is not None:
+ ds_iterable._info.features = Features(
+ {
+ column_mapping[col] if col in column_mapping.keys() else col: feature
+ for col, feature in original_features.items()
+ }
+ )
+ # check that it's still valid, especially with regard to task templates
+ try:
+ ds_iterable._info.copy()
+ except ValueError:
+ ds_iterable._info.task_templates = None
+ return ds_iterable
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+ The removal is done on-the-fly on the examples when iterating over the dataset.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
+ >>> ds = ds.remove_columns("label")
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ original_features = self._info.features.copy() if self._info.features else None
+ ds_iterable = self.map(remove_columns=column_names)
+ if original_features is not None:
+ ds_iterable._info.features = original_features.copy()
+ for col, _ in original_features.items():
+ if col in column_names:
+ del ds_iterable._info.features[col]
+ # check that it's still valid, especially with regard to task templates
+ try:
+ ds_iterable._info.copy()
+ except ValueError:
+ ds_iterable._info.task_templates = None
+
+ return ds_iterable
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
+ """Select one or several column(s) in the dataset and the features
+ associated to them. The selection is done on-the-fly on the examples
+ when iterating over the dataset.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to select.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset object with selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
+ >>> ds = ds.select_columns("text")
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ if self._info:
+ info = copy.deepcopy(self._info)
+ if self._info.features is not None:
+ missing_columns = set(column_names) - set(self._info.features.keys())
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the "
+ "dataset. Columns in the dataset: "
+ f"{list(self._info.features.keys())}."
+ )
+ info.features = Features({c: info.features[c] for c in column_names})
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+
+ ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=self._shuffling,
+ distributed=self._distributed,
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature (`Feature`):
+ Target feature.
+
+ Returns:
+ `IterableDataset`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, Audio
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True)
+ >>> ds.features
+ {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None),
+ 'english_transcription': Value(dtype='string', id=None),
+ 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
+ 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
+ 'path': Value(dtype='string', id=None),
+ 'transcription': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
+ >>> ds.features
+ {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None),
+ 'english_transcription': Value(dtype='string', id=None),
+ 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
+ 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
+ 'path': Value(dtype='string', id=None),
+ 'transcription': Value(dtype='string', id=None)}
+ ```
+ """
+ info = self._info.copy()
+ info.features[column] = feature
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def cast(
+ self,
+ features: Features,
+ ) -> "IterableDataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds.features.copy()
+ >>> new_features["label"] = ClassLabel(names=["bad", "good"])
+ >>> new_features["text"] = Value("large_string")
+ >>> ds = ds.cast(new_features)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ info = self._info.copy()
+ info.features = features
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def _step(self, step: int, offset: int) -> "IterableDataset":
+ ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def _resolve_features(self):
+ if self.features is not None:
+ return self
+ elif isinstance(self._ex_iterable, TypedExamplesIterable):
+ features = self._ex_iterable.features
+ else:
+ features = _infer_features_from_batch(self.with_format(None)._head())
+ info = self.info.copy()
+ info.features = features
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+
+def _concatenate_iterable_datasets(
+ dsets: List[IterableDataset],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+) -> IterableDataset:
+ """
+ Converts a list of `IterableDataset` with the same schema into a single `IterableDataset`.
+ Missing data are filled with None values.
+
+
+
+ Args:
+ dsets (`List[datasets.IterableDataset]`): List of Datasets to concatenate.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+ axis (``{0, 1}``, default ``0``, meaning over rows):
+ Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
+ (horizontally).
+
+ *New in version 1.6.0*
+
+ Example:
+
+ ```py
+ >>> ds3 = _concatenate_iterable_datasets([ds1, ds2])
+ ```
+ """
+ dsets = [d._resolve_features() for d in dsets]
+
+ # Perform checks (and a potentional cast if axis=0)
+ if axis == 0:
+ _check_if_features_can_be_aligned([dset.features for dset in dsets])
+ else:
+ _check_column_names([col_name for dset in dsets for col_name in dset.features])
+
+ # TODO: improve this to account for a mix of ClassLabel and Value for example
+ # right now it would keep the type of the first dataset in the list
+ features = Features(
+ {k: v for features in _align_features([dset.features for dset in dsets]) for k, v in features.items()}
+ )
+
+ ex_iterables = [d._ex_iterable for d in dsets]
+ if axis == 0:
+ ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+ else:
+ ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+ # Set new info - we update the features
+ # setting the features also ensures to fill missing columns with None
+ if info is None:
+ info = DatasetInfo.from_merge([d.info for d in dsets])
+ else:
+ info = info.copy()
+ info.features = features
+ # Get all the auth tokens per repository - in case the datasets come from different private repositories
+ token_per_repo_id = {repo_id: token for dataset in dsets for repo_id, token in dataset._token_per_repo_id.items()}
+ # Return new daset
+ return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
+
+
+def _interleave_iterable_datasets(
+ datasets: List[IterableDataset],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+) -> IterableDataset:
+ """
+ Interleave several iterable datasets (sources) into a single iterable dataset.
+ The new iterable dataset alternates between the sources to yield examples.
+ If `probabilities = None` (default) the iterable dataset will cycles through the sources in order for each next example in the iteration.
+ If `probabilities` is not `None, the iterable dataset will sample a random source according to the provided probabilities for each next examples in the iteration.
+
+
+
+ Args:
+ datasets (`List[IterableDataset]`): list of datasets to interleave
+ probabilities (`List[float]`, optional, default None): If specified, the new iterable dataset samples
+ examples from one source at a time according to these probabilities.
+ seed (`int`, optional, default None): The random seed used to choose a source for each example.
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+
+ Output:
+ `datasets.IterableDataset`
+ """
+ datasets = [d._resolve_features() for d in datasets]
+
+ # Perform checks
+ _check_if_features_can_be_aligned([dset.features for dset in datasets])
+
+ # TODO: improve this to account for a mix of ClassLabel and Value for example
+ # right now it would keep the type of the first dataset in the list
+ features = Features(
+ {k: v for features in _align_features([dset.features for dset in datasets]) for k, v in features.items()}
+ )
+
+ ex_iterables = [d._ex_iterable for d in datasets]
+
+ # Use cycling or random cycling of sources
+ if probabilities is None:
+ ex_iterable = CyclingMultiSourcesExamplesIterable(ex_iterables, stopping_strategy=stopping_strategy)
+ else:
+ generator = np.random.default_rng(seed)
+ ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
+ ex_iterables, generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy
+ )
+ # Set new info - we update the features
+ # setting the features also ensures to fill missing columns with None
+ if info is None:
+ info = DatasetInfo.from_merge([d.info for d in datasets])
+ else:
+ info = info.copy()
+ info.features = features
+ # Get all the auth tokens per repository - in case the datasets come from different private repositories
+ token_per_repo_id = {
+ repo_id: token for dataset in datasets for repo_id, token in dataset._token_per_repo_id.items()
+ }
+ # Return new daset
+ return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
+
+
+def _split_by_node_iterable_dataset(dataset: IterableDataset, rank: int, world_size: int) -> IterableDataset:
+ """
+ Split an iterable dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
+ then the shards are evenly assigned across the nodes, which is the most optimized.
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
+
+ Args:
+ dataset ([`IterableDataset`]):
+ The iterable dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`IterableDataset`]: The iterable dataset to be used on the node at rank `rank`.
+ """
+ if dataset._distributed:
+ world_size = world_size * dataset._distributed.world_size
+ rank = world_size * dataset._distributed.rank + rank
+ distributed = DistributedConfig(rank=rank, world_size=world_size)
+ return IterableDataset(
+ ex_iterable=dataset._ex_iterable,
+ info=dataset._info.copy(),
+ split=dataset._split,
+ formatting=dataset._formatting,
+ shuffling=copy.deepcopy(dataset._shuffling),
+ distributed=distributed,
+ token_per_repo_id=dataset._token_per_repo_id,
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/keyhash.py b/llmeval-env/lib/python3.10/site-packages/datasets/keyhash.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c75fcfd7ffb300aac1ffd0fc822287f21b56f8a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/keyhash.py
@@ -0,0 +1,104 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+
+"""
+Hashing function for dataset keys using `hashlib.md5`
+
+Requirements for the hash function:
+
+- Provides a uniformly distributed hash from random space
+- Adequately fast speed
+- Working with multiple input types (in this case, `str`, `int` or `bytes`)
+- Should be platform independent (generates same hash on different OS and systems)
+
+The hashing function provides a unique 128-bit integer hash of the key provided.
+
+The split name is being used here as the hash salt to avoid having same hashes
+in different splits due to same keys
+"""
+
+from typing import Union
+
+from huggingface_hub.utils import insecure_hashlib
+
+
+def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
+ """
+ Returns the input hash_data in its bytes form
+
+ Args:
+ hash_data: the hash salt/key to be converted to bytes
+ """
+ if isinstance(hash_data, bytes):
+ # Data already in bytes, returns as it as
+ return hash_data
+ elif isinstance(hash_data, str):
+ # We keep the data as it as for it ot be later encoded to UTF-8
+ # However replace `\\` with `/` for Windows compatibility
+ hash_data = hash_data.replace("\\", "/")
+ elif isinstance(hash_data, int):
+ hash_data = str(hash_data)
+ else:
+ # If data is not of the required type, raise error
+ raise InvalidKeyError(hash_data)
+
+ return hash_data.encode("utf-8")
+
+
+class InvalidKeyError(Exception):
+ """Raises an error when given key is of invalid datatype."""
+
+ def __init__(self, hash_data):
+ self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
+ self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
+ self.suffix = "\nKeys should be either str, int or bytes type"
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
+
+
+class DuplicatedKeysError(Exception):
+ """Raise an error when duplicate key found."""
+
+ def __init__(self, key, duplicate_key_indices, fix_msg=""):
+ self.key = key
+ self.duplicate_key_indices = duplicate_key_indices
+ self.fix_msg = fix_msg
+ self.prefix = "Found multiple examples generated with the same key"
+ if len(duplicate_key_indices) <= 20:
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
+ else:
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
+ self.suffix = "\n" + fix_msg if fix_msg else ""
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
+
+
+class KeyHasher:
+ """KeyHasher class for providing hash using md5"""
+
+ def __init__(self, hash_salt: str):
+ self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
+
+ def hash(self, key: Union[str, int, bytes]) -> int:
+ """Returns 128-bits unique hash of input key
+
+ Args:
+ key: the input key to be hashed (should be str, int or bytes)
+
+ Returns: 128-bit int hash key"""
+ md5 = self._split_md5.copy()
+ byte_key = _as_bytes(key)
+ md5.update(byte_key)
+ # Convert to integer with hexadecimal conversion
+ return int(md5.hexdigest(), 16)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/load.py b/llmeval-env/lib/python3.10/site-packages/datasets/load.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd7aa401094b57a2cbe433567fe64e36ad775e07
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/load.py
@@ -0,0 +1,2699 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Access datasets."""
+
+import filecmp
+import glob
+import importlib
+import inspect
+import json
+import os
+import posixpath
+import shutil
+import signal
+import time
+import warnings
+from collections import Counter
+from contextlib import nullcontext
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
+
+import fsspec
+import requests
+import yaml
+from fsspec.core import url_to_fs
+from huggingface_hub import DatasetCard, DatasetCardData, HfApi, HfFileSystem
+
+from . import config
+from .arrow_dataset import Dataset
+from .builder import BuilderConfig, DatasetBuilder
+from .data_files import (
+ DEFAULT_PATTERNS_ALL,
+ DataFilesDict,
+ DataFilesList,
+ DataFilesPatternsDict,
+ DataFilesPatternsList,
+ EmptyDatasetError,
+ get_data_patterns,
+ get_metadata_patterns,
+ sanitize_patterns,
+)
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadMode
+from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin
+from .exceptions import DataFilesNotFoundError, DatasetNotFoundError
+from .features import Features
+from .fingerprint import Hasher
+from .info import DatasetInfo, DatasetInfosDict
+from .iterable_dataset import IterableDataset
+from .metric import Metric
+from .naming import camelcase_to_snakecase, snakecase_to_camelcase
+from .packaged_modules import (
+ _EXTENSION_TO_MODULE,
+ _MODULE_SUPPORTS_METADATA,
+ _MODULE_TO_EXTENSIONS,
+ _PACKAGED_DATASETS_MODULES,
+ _hash_python_lines,
+)
+from .splits import Split
+from .utils import _dataset_viewer
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import (
+ OfflineModeIsEnabled,
+ _raise_if_offline_mode_is_enabled,
+ cached_path,
+ head_hf_s3,
+ hf_github_url,
+ init_hf_modules,
+ is_relative_path,
+ relative_to_absolute_path,
+ url_or_path_join,
+)
+from .utils.hub import hf_dataset_url
+from .utils.info_utils import VerificationMode, is_small_dataset
+from .utils.logging import get_logger
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import get_imports, lock_importable_file
+from .utils.version import Version
+
+
+logger = get_logger(__name__)
+
+ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + [".zip"]
+
+
+def _raise_timeout_error(signum, frame):
+ raise ValueError(
+ "Loading this dataset requires you to execute custom code contained in the dataset repository on your local "
+ "machine. Please set the option `trust_remote_code=True` to permit loading of this dataset."
+ )
+
+
+def resolve_trust_remote_code(trust_remote_code: Optional[bool], repo_id: str) -> bool:
+ """
+ Copied and adapted from Transformers
+ https://github.com/huggingface/transformers/blob/2098d343cc4b4b9d2aea84b3cf1eb5a1e610deff/src/transformers/dynamic_module_utils.py#L589
+ """
+ trust_remote_code = trust_remote_code if trust_remote_code is not None else config.HF_DATASETS_TRUST_REMOTE_CODE
+ if trust_remote_code is None:
+ if config.TIME_OUT_REMOTE_CODE > 0:
+ try:
+ signal.signal(signal.SIGALRM, _raise_timeout_error)
+ signal.alarm(config.TIME_OUT_REMOTE_CODE)
+ while trust_remote_code is None:
+ answer = input(
+ f"The repository for {repo_id} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n"
+ f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n"
+ f"Do you wish to run the custom code? [y/N] "
+ )
+ if answer.lower() in ["yes", "y", "1"]:
+ trust_remote_code = True
+ elif answer.lower() in ["no", "n", "0", ""]:
+ trust_remote_code = False
+ signal.alarm(0)
+ except Exception:
+ # OS which does not support signal.SIGALRM
+ raise ValueError(
+ f"The repository for {repo_id} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\n"
+ f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
+ )
+ else:
+ # For the CI which might put the timeout at 0
+ _raise_timeout_error(None, None)
+ return trust_remote_code
+
+
+def init_dynamic_modules(
+ name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
+):
+ """
+ Create a module with name `name` in which you can add dynamic modules
+ such as metrics or datasets. The module can be imported using its name.
+ The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
+ be overridden by specifying a path to another directory in `hf_modules_cache`.
+ """
+ hf_modules_cache = init_hf_modules(hf_modules_cache)
+ dynamic_modules_path = os.path.join(hf_modules_cache, name)
+ os.makedirs(dynamic_modules_path, exist_ok=True)
+ if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
+ with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
+ pass
+ return dynamic_modules_path
+
+
+def import_main_class(module_path, dataset=True) -> Optional[Union[Type[DatasetBuilder], Type[Metric]]]:
+ """Import a module at module_path and return its main class:
+ - a DatasetBuilder if dataset is True
+ - a Metric if dataset is False
+ """
+ module = importlib.import_module(module_path)
+
+ if dataset:
+ main_cls_type = DatasetBuilder
+ else:
+ main_cls_type = Metric
+
+ # Find the main class in our imported module
+ module_main_cls = None
+ for name, obj in module.__dict__.items():
+ if inspect.isclass(obj) and issubclass(obj, main_cls_type):
+ if inspect.isabstract(obj):
+ continue
+ module_main_cls = obj
+ obj_module = inspect.getmodule(obj)
+ if obj_module is not None and module == obj_module:
+ break
+
+ return module_main_cls
+
+
+class _InitializeConfiguredDatasetBuilder:
+ """
+ From https://stackoverflow.com/questions/4647566/pickle-a-dynamically-parameterized-sub-class
+ See also ConfiguredDatasetBuilder.__reduce__
+ When called with the param value as the only argument, returns an
+ un-initialized instance of the parameterized class. Subsequent __setstate__
+ will be called by pickle.
+ """
+
+ def __call__(self, builder_cls, metadata_configs, default_config_name, name):
+ # make a simple object which has no complex __init__ (this one will do)
+ obj = _InitializeConfiguredDatasetBuilder()
+ obj.__class__ = configure_builder_class(
+ builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name
+ )
+ return obj
+
+
+def configure_builder_class(
+ builder_cls: Type[DatasetBuilder],
+ builder_configs: List[BuilderConfig],
+ default_config_name: Optional[str],
+ dataset_name: str,
+) -> Type[DatasetBuilder]:
+ """
+ Dynamically create a builder class with custom builder configs parsed from README.md file,
+ i.e. set BUILDER_CONFIGS class variable of a builder class to custom configs list.
+ """
+
+ class ConfiguredDatasetBuilder(builder_cls):
+ BUILDER_CONFIGS = builder_configs
+ DEFAULT_CONFIG_NAME = default_config_name
+
+ __module__ = builder_cls.__module__ # so that the actual packaged builder can be imported
+
+ def __reduce__(self): # to make dynamically created class pickable, see _InitializeParameterizedDatasetBuilder
+ parent_builder_cls = self.__class__.__mro__[1]
+ return (
+ _InitializeConfiguredDatasetBuilder(),
+ (
+ parent_builder_cls,
+ self.BUILDER_CONFIGS,
+ self.DEFAULT_CONFIG_NAME,
+ self.dataset_name,
+ ),
+ self.__dict__.copy(),
+ )
+
+ ConfiguredDatasetBuilder.__name__ = (
+ f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
+ )
+ ConfiguredDatasetBuilder.__qualname__ = (
+ f"{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}"
+ )
+
+ return ConfiguredDatasetBuilder
+
+
+def get_dataset_builder_class(
+ dataset_module: "DatasetModule", dataset_name: Optional[str] = None
+) -> Type[DatasetBuilder]:
+ with lock_importable_file(
+ dataset_module.importable_file_path
+ ) if dataset_module.importable_file_path else nullcontext():
+ builder_cls = import_main_class(dataset_module.module_path)
+ if dataset_module.builder_configs_parameters.builder_configs:
+ dataset_name = dataset_name or dataset_module.builder_kwargs.get("dataset_name")
+ if dataset_name is None:
+ raise ValueError("dataset_name should be specified but got None")
+ builder_cls = configure_builder_class(
+ builder_cls,
+ builder_configs=dataset_module.builder_configs_parameters.builder_configs,
+ default_config_name=dataset_module.builder_configs_parameters.default_config_name,
+ dataset_name=dataset_name,
+ )
+ return builder_cls
+
+
+def files_to_hash(file_paths: List[str]) -> str:
+ """
+ Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
+ """
+ # List all python files in directories if directories are supplied as part of external imports
+ to_use_files: List[Union[Path, str]] = []
+ for file_path in file_paths:
+ if os.path.isdir(file_path):
+ to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
+ else:
+ to_use_files.append(file_path)
+
+ # Get the code from all these files
+ lines = []
+ for file_path in to_use_files:
+ with open(file_path, encoding="utf-8") as f:
+ lines.extend(f.readlines())
+ return _hash_python_lines(lines)
+
+
+def increase_load_count(name: str, resource_type: str):
+ """Update the download count of a dataset or metric."""
+ if not config.HF_DATASETS_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS:
+ try:
+ head_hf_s3(name, filename=name + ".py", dataset=(resource_type == "dataset"))
+ except Exception:
+ pass
+
+
+def _download_additional_modules(
+ name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]
+) -> List[Tuple[str, str]]:
+ """
+ Download additional module for a module .py at URL (or local path) /.py
+ The imports must have been parsed first using ``get_imports``.
+
+ If some modules need to be installed with pip, an error is raised showing how to install them.
+ This function return the list of downloaded modules as tuples (import_name, module_file_path).
+
+ The downloaded modules can then be moved into an importable directory with ``_copy_script_and_other_resources_in_importable_dir``.
+ """
+ local_imports = []
+ library_imports = []
+ download_config = download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading extra modules"
+ for import_type, import_name, import_path, sub_directory in imports:
+ if import_type == "library":
+ library_imports.append((import_name, import_path)) # Import from a library
+ continue
+
+ if import_name == name:
+ raise ValueError(
+ f"Error in the {name} script, importing relative {import_name} module "
+ f"but {import_name} is the name of the script. "
+ f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
+ f"comment pointing to the original relative import file path."
+ )
+ if import_type == "internal":
+ url_or_filename = url_or_path_join(base_path, import_path + ".py")
+ elif import_type == "external":
+ url_or_filename = import_path
+ else:
+ raise ValueError("Wrong import_type")
+
+ local_import_path = cached_path(
+ url_or_filename,
+ download_config=download_config,
+ )
+ if sub_directory is not None:
+ local_import_path = os.path.join(local_import_path, sub_directory)
+ local_imports.append((import_name, local_import_path))
+
+ # Check library imports
+ needs_to_be_installed = {}
+ for library_import_name, library_import_path in library_imports:
+ try:
+ lib = importlib.import_module(library_import_name) # noqa F841
+ except ImportError:
+ if library_import_name not in needs_to_be_installed or library_import_path != library_import_name:
+ needs_to_be_installed[library_import_name] = library_import_path
+ if needs_to_be_installed:
+ _dependencies_str = "dependencies" if len(needs_to_be_installed) > 1 else "dependency"
+ _them_str = "them" if len(needs_to_be_installed) > 1 else "it"
+ if "sklearn" in needs_to_be_installed.keys():
+ needs_to_be_installed["sklearn"] = "scikit-learn"
+ if "Bio" in needs_to_be_installed.keys():
+ needs_to_be_installed["Bio"] = "biopython"
+ raise ImportError(
+ f"To be able to use {name}, you need to install the following {_dependencies_str}: "
+ f"{', '.join(needs_to_be_installed)}.\nPlease install {_them_str} using 'pip install "
+ f"{' '.join(needs_to_be_installed.values())}' for instance."
+ )
+ return local_imports
+
+
+def _copy_script_and_other_resources_in_importable_dir(
+ name: str,
+ importable_directory_path: str,
+ subdirectory_name: str,
+ original_local_path: str,
+ local_imports: List[Tuple[str, str]],
+ additional_files: List[Tuple[str, str]],
+ download_mode: Optional[Union[DownloadMode, str]],
+) -> str:
+ """Copy a script and its required imports to an importable directory
+
+ Args:
+ name (str): name of the resource to load
+ importable_directory_path (str): path to the loadable folder in the dynamic modules directory
+ subdirectory_name (str): name of the subdirectory in importable_directory_path in which to place the script
+ original_local_path (str): local path to the resource script
+ local_imports (List[Tuple[str, str]]): list of (destination_filename, import_file_to_copy)
+ additional_files (List[Tuple[str, str]]): list of (destination_filename, additional_file_to_copy)
+ download_mode (Optional[Union[DownloadMode, str]]): download mode
+
+ Return:
+ importable_file: path to an importable module with importlib.import_module
+ """
+ # Define a directory with a unique name in our dataset or metric folder
+ # path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
+ # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
+ importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name)
+ importable_file = os.path.join(importable_subdirectory, name + ".py")
+ # Prevent parallel disk operations
+ with lock_importable_file(importable_file):
+ # Create main dataset/metrics folder if needed
+ if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path):
+ shutil.rmtree(importable_directory_path)
+ os.makedirs(importable_directory_path, exist_ok=True)
+
+ # add an __init__ file to the main dataset folder if needed
+ init_file_path = os.path.join(importable_directory_path, "__init__.py")
+ if not os.path.exists(init_file_path):
+ with open(init_file_path, "w"):
+ pass
+
+ # Create hash dataset folder if needed
+ os.makedirs(importable_subdirectory, exist_ok=True)
+ # add an __init__ file to the hash dataset folder if needed
+ init_file_path = os.path.join(importable_subdirectory, "__init__.py")
+ if not os.path.exists(init_file_path):
+ with open(init_file_path, "w"):
+ pass
+
+ # Copy dataset.py file in hash folder if needed
+ if not os.path.exists(importable_file):
+ shutil.copyfile(original_local_path, importable_file)
+ # Record metadata associating original dataset path with local unique folder
+ # Use os.path.splitext to split extension from importable_local_file
+ meta_path = os.path.splitext(importable_file)[0] + ".json"
+ if not os.path.exists(meta_path):
+ meta = {"original file path": original_local_path, "local file path": importable_file}
+ # the filename is *.py in our case, so better rename to filename.json instead of filename.py.json
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
+ json.dump(meta, meta_file)
+
+ # Copy all the additional imports
+ for import_name, import_path in local_imports:
+ if os.path.isfile(import_path):
+ full_path_local_import = os.path.join(importable_subdirectory, import_name + ".py")
+ if not os.path.exists(full_path_local_import):
+ shutil.copyfile(import_path, full_path_local_import)
+ elif os.path.isdir(import_path):
+ full_path_local_import = os.path.join(importable_subdirectory, import_name)
+ if not os.path.exists(full_path_local_import):
+ shutil.copytree(import_path, full_path_local_import)
+ else:
+ raise ImportError(f"Error with local import at {import_path}")
+
+ # Copy additional files like dataset_infos.json file if needed
+ for file_name, original_path in additional_files:
+ destination_additional_path = os.path.join(importable_subdirectory, file_name)
+ if not os.path.exists(destination_additional_path) or not filecmp.cmp(
+ original_path, destination_additional_path
+ ):
+ shutil.copyfile(original_path, destination_additional_path)
+ return importable_file
+
+
+def _get_importable_file_path(
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+) -> str:
+ importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
+ return os.path.join(importable_directory_path, subdirectory_name, name.split("/")[-1] + ".py")
+
+
+def _create_importable_file(
+ local_path: str,
+ local_imports: List[Tuple[str, str]],
+ additional_files: List[Tuple[str, str]],
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+ download_mode: DownloadMode,
+) -> None:
+ importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace("/", "--"))
+ Path(importable_directory_path).mkdir(parents=True, exist_ok=True)
+ (Path(importable_directory_path).parent / "__init__.py").touch(exist_ok=True)
+ importable_local_file = _copy_script_and_other_resources_in_importable_dir(
+ name=name.split("/")[-1],
+ importable_directory_path=importable_directory_path,
+ subdirectory_name=subdirectory_name,
+ original_local_path=local_path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ download_mode=download_mode,
+ )
+ logger.debug(f"Created importable dataset file at {importable_local_file}")
+
+
+def _load_importable_file(
+ dynamic_modules_path: str,
+ module_namespace: str,
+ subdirectory_name: str,
+ name: str,
+) -> Tuple[str, str]:
+ module_path = ".".join(
+ [
+ os.path.basename(dynamic_modules_path),
+ module_namespace,
+ name.replace("/", "--"),
+ subdirectory_name,
+ name.split("/")[-1],
+ ]
+ )
+ return module_path, subdirectory_name
+
+
+def infer_module_for_data_files_list(
+ data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], dict]:
+ """Infer module (and builder kwargs) from list of data files.
+
+ It picks the module based on the most common file extension.
+ In case of a draw ".parquet" is the favorite, and then alphabetical order.
+
+ Args:
+ data_files_list (DataFilesList): List of data files.
+ download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - dict of builder kwargs
+ """
+ extensions_counter = Counter(
+ ("." + suffix.lower(), xbasename(filepath) in ("metadata.jsonl", "metadata.csv"))
+ for filepath in data_files_list[: config.DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE]
+ for suffix in xbasename(filepath).split(".")[1:]
+ )
+ if extensions_counter:
+
+ def sort_key(ext_count: Tuple[Tuple[str, bool], int]) -> Tuple[int, bool]:
+ """Sort by count and set ".parquet" as the favorite in case of a draw, and ignore metadata files"""
+ (ext, is_metadata), count = ext_count
+ return (not is_metadata, count, ext == ".parquet", ext)
+
+ for (ext, _), _ in sorted(extensions_counter.items(), key=sort_key, reverse=True):
+ if ext in _EXTENSION_TO_MODULE:
+ return _EXTENSION_TO_MODULE[ext]
+ elif ext == ".zip":
+ return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config)
+ return None, {}
+
+
+def infer_module_for_data_files_list_in_archives(
+ data_files_list: DataFilesList, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], dict]:
+ """Infer module (and builder kwargs) from list of archive data files.
+
+ Args:
+ data_files_list (DataFilesList): List of data files.
+ download_config (bool or str, optional): mainly use use_auth_token or storage_options to support different platforms and auth types.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - dict of builder kwargs
+ """
+ archived_files = []
+ archive_files_counter = 0
+ for filepath in data_files_list:
+ if str(filepath).endswith(".zip"):
+ archive_files_counter += 1
+ if archive_files_counter > config.GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE:
+ break
+ extracted = xjoin(StreamingDownloadManager().extract(filepath), "**")
+ archived_files += [
+ f.split("::")[0]
+ for f in xglob(extracted, recursive=True, download_config=download_config)[
+ : config.ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE
+ ]
+ ]
+ extensions_counter = Counter(
+ "." + suffix.lower() for filepath in archived_files for suffix in xbasename(filepath).split(".")[1:]
+ )
+ if extensions_counter:
+ most_common = extensions_counter.most_common(1)[0][0]
+ if most_common in _EXTENSION_TO_MODULE:
+ return _EXTENSION_TO_MODULE[most_common]
+ return None, {}
+
+
+def infer_module_for_data_files(
+ data_files: DataFilesDict, path: Optional[str] = None, download_config: Optional[DownloadConfig] = None
+) -> Tuple[Optional[str], Dict[str, Any]]:
+ """Infer module (and builder kwargs) from data files. Raise if module names for different splits don't match.
+
+ Args:
+ data_files ([`DataFilesDict`]): Dict of list of data files.
+ path (str, *optional*): Dataset name or path.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters to authenticate on the Hugging Face Hub for private remote files.
+
+ Returns:
+ tuple[str, dict[str, Any]]: Tuple with
+ - inferred module name
+ - builder kwargs
+ """
+ split_modules = {
+ split: infer_module_for_data_files_list(data_files_list, download_config=download_config)
+ for split, data_files_list in data_files.items()
+ }
+ module_name, default_builder_kwargs = next(iter(split_modules.values()))
+ if any((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values()):
+ raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}")
+ if not module_name:
+ raise DataFilesNotFoundError("No (supported) data files found" + (f" in {path}" if path else ""))
+ return module_name, default_builder_kwargs
+
+
+def create_builder_configs_from_metadata_configs(
+ module_path: str,
+ metadata_configs: MetadataConfigs,
+ supports_metadata: bool,
+ base_path: Optional[str] = None,
+ default_builder_kwargs: Dict[str, Any] = None,
+ download_config: Optional[DownloadConfig] = None,
+) -> Tuple[List[BuilderConfig], str]:
+ builder_cls = import_main_class(module_path)
+ builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS
+ default_config_name = metadata_configs.get_default_config_name()
+ builder_configs = []
+ default_builder_kwargs = {} if default_builder_kwargs is None else default_builder_kwargs
+
+ base_path = base_path if base_path is not None else ""
+ for config_name, config_params in metadata_configs.items():
+ config_data_files = config_params.get("data_files")
+ config_data_dir = config_params.get("data_dir")
+ config_base_path = xjoin(base_path, config_data_dir) if config_data_dir else base_path
+ try:
+ config_patterns = (
+ sanitize_patterns(config_data_files)
+ if config_data_files is not None
+ else get_data_patterns(config_base_path, download_config=download_config)
+ )
+ config_data_files_dict = DataFilesPatternsDict.from_patterns(
+ config_patterns,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ )
+ except EmptyDatasetError as e:
+ raise EmptyDatasetError(
+ f"Dataset at '{base_path}' doesn't contain data files matching the patterns for config '{config_name}',"
+ f" check `data_files` and `data_fir` parameters in the `configs` YAML field in README.md. "
+ ) from e
+ if config_data_files is None and supports_metadata and config_patterns != DEFAULT_PATTERNS_ALL:
+ try:
+ config_metadata_patterns = get_metadata_patterns(base_path, download_config=download_config)
+ except FileNotFoundError:
+ config_metadata_patterns = None
+ if config_metadata_patterns is not None:
+ config_metadata_data_files_list = DataFilesPatternsList.from_patterns(config_metadata_patterns)
+ config_data_files_dict = DataFilesPatternsDict(
+ {
+ split: data_files_list + config_metadata_data_files_list
+ for split, data_files_list in config_data_files_dict.items()
+ }
+ )
+ ignored_params = [
+ param for param in config_params if not hasattr(builder_config_cls, param) and param != "default"
+ ]
+ if ignored_params:
+ logger.warning(
+ f"Some datasets params were ignored: {ignored_params}. "
+ "Make sure to use only valid params for the dataset builder and to have "
+ "a up-to-date version of the `datasets` library."
+ )
+ builder_configs.append(
+ builder_config_cls(
+ name=config_name,
+ data_files=config_data_files_dict,
+ data_dir=config_data_dir,
+ **{
+ param: value
+ for param, value in {**default_builder_kwargs, **config_params}.items()
+ if hasattr(builder_config_cls, param) and param not in ("default", "data_files", "data_dir")
+ },
+ )
+ )
+ return builder_configs, default_config_name
+
+
+@dataclass
+class BuilderConfigsParameters:
+ """Dataclass containing objects related to creation of builder configurations from yaml's metadata content.
+
+ Attributes:
+ metadata_configs (`MetadataConfigs`, *optional*):
+ Configs parsed from yaml's metadata.
+ builder_configs (`list[BuilderConfig]`, *optional*):
+ List of BuilderConfig objects created from metadata_configs above.
+ default_config_name (`str`):
+ Name of default config taken from yaml's metadata.
+ """
+
+ metadata_configs: Optional[MetadataConfigs] = None
+ builder_configs: Optional[List[BuilderConfig]] = None
+ default_config_name: Optional[str] = None
+
+
+@dataclass
+class DatasetModule:
+ module_path: str
+ hash: str
+ builder_kwargs: dict
+ builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters)
+ dataset_infos: Optional[DatasetInfosDict] = None
+ importable_file_path: Optional[str] = None
+
+
+@dataclass
+class MetricModule:
+ module_path: str
+ hash: str
+
+
+class _DatasetModuleFactory:
+ def get_module(self) -> DatasetModule:
+ raise NotImplementedError
+
+
+class _MetricModuleFactory:
+ def get_module(self) -> MetricModule:
+ raise NotImplementedError
+
+
+class GithubMetricModuleFactory(_MetricModuleFactory):
+ """Get the module of a metric. The metric script is downloaded from GitHub.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[str] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config.copy() if download_config else DownloadConfig()
+ if self.download_config.max_retries < 3:
+ self.download_config.max_retries = 3
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+ assert self.name.count("/") == 0
+ increase_load_count(name, resource_type="metric")
+
+ def download_loading_script(self, revision: Optional[str]) -> str:
+ file_path = hf_github_url(path=self.name, name=self.name + ".py", revision=revision, dataset=False)
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading builder script"
+ return cached_path(file_path, download_config=download_config)
+
+ def get_module(self) -> MetricModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ _loading_script_url = hf_github_url(
+ path=self.name, name=self.name + ".py", revision=self.revision, dataset=False
+ )
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the metric. You can inspect the repository content at {_loading_script_url}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ revision = self.revision
+ try:
+ local_path = self.download_loading_script(revision)
+ revision = self.revision
+ except FileNotFoundError:
+ if revision is not None:
+ raise
+ else:
+ revision = "main"
+ local_path = self.download_loading_script(revision)
+ logger.warning(
+ f"Couldn't find a directory or a metric named '{self.name}' in this version. "
+ f"It was picked from the main branch on github instead."
+ )
+ imports = get_imports(local_path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=hf_github_url(path=self.name, name="", revision=revision, dataset=False),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=local_path,
+ local_imports=local_imports,
+ additional_files=[],
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+class LocalMetricModuleFactory(_MetricModuleFactory):
+ """Get the module of a local metric. The metric script is loaded from a local script.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ path: str,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[str] = None,
+ ):
+ self.path = path
+ self.name = Path(path).stem
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+
+ def get_module(self) -> MetricModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the metric. You can inspect the repository content at {self.path}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this metric from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ imports = get_imports(self.path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=str(Path(self.path).parent),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([self.path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=self.path,
+ local_imports=local_imports,
+ additional_files=[],
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="metrics",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+class LocalDatasetModuleFactoryWithScript(_DatasetModuleFactory):
+ """Get the module of a local dataset. The dataset script is loaded from a local script."""
+
+ def __init__(
+ self,
+ path: str,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ ):
+ self.path = path
+ self.name = Path(path).stem
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+
+ def get_module(self) -> DatasetModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at {self.path}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ dataset_infos_path = Path(self.path).parent / config.DATASETDICT_INFOS_FILENAME
+ dataset_readme_path = Path(self.path).parent / config.REPOCARD_FILENAME
+ imports = get_imports(self.path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=str(Path(self.path).parent),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ additional_files = []
+ if dataset_infos_path.is_file():
+ additional_files.append((config.DATASETDICT_INFOS_FILENAME, str(dataset_infos_path)))
+ if dataset_readme_path.is_file():
+ additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([self.path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=self.path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ builder_kwargs = {"base_path": str(Path(self.path).parent)}
+ return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
+
+
+class LocalDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
+ """Get the module of a dataset loaded from the user's data files. The dataset builder module to use is inferred
+ from the data files extensions."""
+
+ def __init__(
+ self,
+ path: str,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ if data_dir and os.path.isabs(data_dir):
+ raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}")
+
+ self.path = Path(path).as_posix()
+ self.name = Path(path).stem
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_mode = download_mode
+
+ def get_module(self) -> DatasetModule:
+ readme_path = os.path.join(self.path, config.REPOCARD_FILENAME)
+ standalone_yaml_path = os.path.join(self.path, config.REPOYAML_FILENAME)
+ dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData()
+ if os.path.exists(standalone_yaml_path):
+ with open(standalone_yaml_path, "r", encoding="utf-8") as f:
+ standalone_yaml_data = yaml.safe_load(f.read())
+ if standalone_yaml_data:
+ _dataset_card_data_dict = dataset_card_data.to_dict()
+ _dataset_card_data_dict.update(standalone_yaml_data)
+ dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ # we need a set of data files to find which dataset builder to use
+ # because we need to infer module name by files extensions
+ base_path = Path(self.path, self.data_dir or "").expanduser().resolve().as_posix()
+ if self.data_files is not None:
+ patterns = sanitize_patterns(self.data_files)
+ elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
+ patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
+ else:
+ patterns = get_data_patterns(base_path)
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ base_path=base_path,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ )
+ module_name, default_builder_kwargs = infer_module_for_data_files(
+ data_files=data_files,
+ path=self.path,
+ )
+ data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
+ # Collect metadata files if the module supports them
+ supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, base_path=base_path)
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
+ if metadata_configs:
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ base_path=base_path,
+ supports_metadata=supports_metadata,
+ default_builder_kwargs=default_builder_kwargs,
+ )
+ else:
+ builder_configs: List[BuilderConfig] = [
+ import_main_class(module_path).BUILDER_CONFIG_CLASS(
+ data_files=data_files,
+ **default_builder_kwargs,
+ )
+ ]
+ default_config_name = None
+ builder_kwargs = {
+ "base_path": self.path,
+ "dataset_name": camelcase_to_snakecase(Path(self.path).name),
+ }
+ if self.data_dir:
+ builder_kwargs["data_files"] = data_files
+ # this file is deprecated and was created automatically in old versions of push_to_hub
+ if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)):
+ with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
+ legacy_dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ if len(legacy_dataset_infos) == 1:
+ # old config e.g. named "username--dataset_name"
+ legacy_config_name = next(iter(legacy_dataset_infos))
+ legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
+ legacy_dataset_infos.update(dataset_infos)
+ dataset_infos = legacy_dataset_infos
+ if default_config_name is None and len(dataset_infos) == 1:
+ default_config_name = next(iter(dataset_infos))
+
+ hash = Hasher.hash({"dataset_infos": dataset_infos, "builder_configs": builder_configs})
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class PackagedDatasetModuleFactory(_DatasetModuleFactory):
+ """Get the dataset builder module from the ones that are packaged with the library: csv, json, etc."""
+
+ def __init__(
+ self,
+ name: str,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ self.name = name
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_config = download_config
+ self.download_mode = download_mode
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ base_path = Path(self.data_dir or "").expanduser().resolve().as_posix()
+ patterns = (
+ sanitize_patterns(self.data_files)
+ if self.data_files is not None
+ else get_data_patterns(base_path, download_config=self.download_config)
+ )
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ download_config=self.download_config,
+ base_path=base_path,
+ )
+ supports_metadata = self.name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata and patterns != DEFAULT_PATTERNS_ALL:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(
+ metadata_patterns, download_config=self.download_config, base_path=base_path
+ )
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, hash = _PACKAGED_DATASETS_MODULES[self.name]
+
+ builder_kwargs = {
+ "data_files": data_files,
+ "dataset_name": self.name,
+ }
+
+ return DatasetModule(module_path, hash, builder_kwargs)
+
+
+class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
+ """
+ Get the module of a dataset loaded from data files of a dataset repository.
+ The dataset builder module to use is inferred from the data files extensions.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, List, Dict]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.data_files = data_files
+ self.data_dir = data_dir
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(
+ self.name,
+ revision=self.revision,
+ token=self.download_config.token,
+ timeout=100.0,
+ )
+ # even if metadata_configs is not None (which means that we will resolve files for each config later)
+ # we cannot skip resolving all files because we need to infer module name by files extensions
+ revision = hfh_dataset_info.sha # fix the revision in case there are new commits in the meantime
+ base_path = f"hf://datasets/{self.name}@{revision}/{self.data_dir or ''}".rstrip("/")
+
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading readme"
+ try:
+ dataset_readme_path = cached_path(
+ hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ dataset_card_data = DatasetCard.load(Path(dataset_readme_path)).data
+ except FileNotFoundError:
+ dataset_card_data = DatasetCardData()
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading standalone yaml"
+ try:
+ standalone_yaml_path = cached_path(
+ hf_dataset_url(self.name, config.REPOYAML_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ with open(standalone_yaml_path, "r", encoding="utf-8") as f:
+ standalone_yaml_data = yaml.safe_load(f.read())
+ if standalone_yaml_data:
+ _dataset_card_data_dict = dataset_card_data.to_dict()
+ _dataset_card_data_dict.update(standalone_yaml_data)
+ dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
+ except FileNotFoundError:
+ pass
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ if config.USE_PARQUET_EXPORT: # maybe don't use the infos from the parquet export
+ try:
+ exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
+ dataset=self.name, revision=self.revision, token=self.download_config.token
+ )
+ exported_dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
+ for config_name in exported_dataset_infos
+ }
+ )
+ except _dataset_viewer.DatasetViewerError:
+ exported_dataset_infos = None
+ else:
+ exported_dataset_infos = None
+ if exported_dataset_infos:
+ exported_dataset_infos.update(dataset_infos)
+ dataset_infos = exported_dataset_infos
+ # we need a set of data files to find which dataset builder to use
+ # because we need to infer module name by files extensions
+ if self.data_files is not None:
+ patterns = sanitize_patterns(self.data_files)
+ elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
+ patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
+ else:
+ patterns = get_data_patterns(base_path, download_config=self.download_config)
+ data_files = DataFilesDict.from_patterns(
+ patterns,
+ base_path=base_path,
+ allowed_extensions=ALL_ALLOWED_EXTENSIONS,
+ download_config=self.download_config,
+ )
+ module_name, default_builder_kwargs = infer_module_for_data_files(
+ data_files=data_files,
+ path=self.name,
+ download_config=self.download_config,
+ )
+ data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
+ # Collect metadata files if the module supports them
+ supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
+ if self.data_files is None and supports_metadata:
+ try:
+ metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
+ except FileNotFoundError:
+ metadata_patterns = None
+ if metadata_patterns is not None:
+ metadata_data_files_list = DataFilesList.from_patterns(
+ metadata_patterns, download_config=self.download_config, base_path=base_path
+ )
+ if metadata_data_files_list:
+ data_files = DataFilesDict(
+ {
+ split: data_files_list + metadata_data_files_list
+ for split, data_files_list in data_files.items()
+ }
+ )
+
+ module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
+ if metadata_configs:
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ base_path=base_path,
+ supports_metadata=supports_metadata,
+ default_builder_kwargs=default_builder_kwargs,
+ download_config=self.download_config,
+ )
+ else:
+ builder_configs: List[BuilderConfig] = [
+ import_main_class(module_path).BUILDER_CONFIG_CLASS(
+ data_files=data_files,
+ **default_builder_kwargs,
+ )
+ ]
+ default_config_name = None
+ builder_kwargs = {
+ "base_path": hf_dataset_url(self.name, "", revision=revision).rstrip("/"),
+ "repo_id": self.name,
+ "dataset_name": camelcase_to_snakecase(Path(self.name).name),
+ }
+ if self.data_dir:
+ builder_kwargs["data_files"] = data_files
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading metadata"
+ try:
+ # this file is deprecated and was created automatically in old versions of push_to_hub
+ dataset_infos_path = cached_path(
+ hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=revision),
+ download_config=download_config,
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ legacy_dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ if len(legacy_dataset_infos) == 1:
+ # old config e.g. named "username--dataset_name"
+ legacy_config_name = next(iter(legacy_dataset_infos))
+ legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
+ legacy_dataset_infos.update(dataset_infos)
+ dataset_infos = legacy_dataset_infos
+ except FileNotFoundError:
+ pass
+ if default_config_name is None and len(dataset_infos) == 1:
+ default_config_name = next(iter(dataset_infos))
+
+ hash = revision
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class HubDatasetModuleFactoryWithParquetExport(_DatasetModuleFactory):
+ """
+ Get the module of a dataset loaded from parquet files of a dataset repository parquet export.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config or DownloadConfig()
+ increase_load_count(name, resource_type="dataset")
+
+ def get_module(self) -> DatasetModule:
+ exported_parquet_files = _dataset_viewer.get_exported_parquet_files(
+ dataset=self.name, revision=self.revision, token=self.download_config.token
+ )
+ exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
+ dataset=self.name, revision=self.revision, token=self.download_config.token
+ )
+ dataset_infos = DatasetInfosDict(
+ {
+ config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
+ for config_name in exported_dataset_infos
+ }
+ )
+ hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(
+ self.name,
+ revision="refs/convert/parquet",
+ token=self.download_config.token,
+ timeout=100.0,
+ )
+ revision = hfh_dataset_info.sha # fix the revision in case there are new commits in the meantime
+ metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(
+ revision=revision, exported_parquet_files=exported_parquet_files, dataset_infos=dataset_infos
+ )
+ module_path, _ = _PACKAGED_DATASETS_MODULES["parquet"]
+ builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
+ module_path,
+ metadata_configs,
+ supports_metadata=False,
+ download_config=self.download_config,
+ )
+ hash = self.revision
+ builder_kwargs = {
+ "repo_id": self.name,
+ "dataset_name": camelcase_to_snakecase(Path(self.name).name),
+ }
+
+ return DatasetModule(
+ module_path,
+ hash,
+ builder_kwargs,
+ dataset_infos=dataset_infos,
+ builder_configs_parameters=BuilderConfigsParameters(
+ metadata_configs=metadata_configs,
+ builder_configs=builder_configs,
+ default_config_name=default_config_name,
+ ),
+ )
+
+
+class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory):
+ """
+ Get the module of a dataset from a dataset repository.
+ The dataset script comes from the script inside the dataset repository.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ ):
+ self.name = name
+ self.revision = revision
+ self.download_config = download_config or DownloadConfig()
+ self.download_mode = download_mode
+ self.dynamic_modules_path = dynamic_modules_path
+ self.trust_remote_code = trust_remote_code
+ increase_load_count(name, resource_type="dataset")
+
+ def download_loading_script(self) -> str:
+ file_path = hf_dataset_url(self.name, self.name.split("/")[-1] + ".py", revision=self.revision)
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading builder script"
+ return cached_path(file_path, download_config=download_config)
+
+ def download_dataset_infos_file(self) -> str:
+ dataset_infos = hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.revision)
+ # Download the dataset infos file if available
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading metadata"
+ try:
+ return cached_path(
+ dataset_infos,
+ download_config=download_config,
+ )
+ except (FileNotFoundError, ConnectionError):
+ return None
+
+ def download_dataset_readme_file(self) -> str:
+ readme_url = hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=self.revision)
+ # Download the dataset infos file if available
+ download_config = self.download_config.copy()
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading readme"
+ try:
+ return cached_path(
+ readme_url,
+ download_config=download_config,
+ )
+ except (FileNotFoundError, ConnectionError):
+ return None
+
+ def get_module(self) -> DatasetModule:
+ if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
+ warnings.warn(
+ f"The repository for {self.name} contains custom code which must be executed to correctly "
+ f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{self.name}\n"
+ f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
+ f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
+ FutureWarning,
+ )
+ # get script and other files
+ local_path = self.download_loading_script()
+ dataset_infos_path = self.download_dataset_infos_file()
+ dataset_readme_path = self.download_dataset_readme_file()
+ imports = get_imports(local_path)
+ local_imports = _download_additional_modules(
+ name=self.name,
+ base_path=hf_dataset_url(self.name, "", revision=self.revision),
+ imports=imports,
+ download_config=self.download_config,
+ )
+ additional_files = []
+ if dataset_infos_path:
+ additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path))
+ if dataset_readme_path:
+ additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
+ # copy the script and the files in an importable directory
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ if not os.path.exists(importable_file_path):
+ trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
+ if trust_remote_code:
+ _create_importable_file(
+ local_path=local_path,
+ local_imports=local_imports,
+ additional_files=additional_files,
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ download_mode=self.download_mode,
+ )
+ else:
+ raise ValueError(
+ f"Loading {self.name} requires you to execute the dataset script in that"
+ " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
+ " set the option `trust_remote_code=True` to remove this error."
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ builder_kwargs = {
+ "base_path": hf_dataset_url(self.name, "", revision=self.revision).rstrip("/"),
+ "repo_id": self.name,
+ }
+ return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
+
+
+class CachedDatasetModuleFactory(_DatasetModuleFactory):
+ """
+ Get the module of a dataset that has been loaded once already and cached.
+ The script that is loaded from the cache is the most recent one with a matching name.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ cache_dir: Optional[str] = None,
+ dynamic_modules_path: Optional[str] = None,
+ ):
+ self.name = name
+ self.cache_dir = cache_dir
+ self.dynamic_modules_path = dynamic_modules_path
+ assert self.name.count("/") <= 1
+
+ def get_module(self) -> DatasetModule:
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ importable_directory_path = os.path.join(dynamic_modules_path, "datasets", self.name.replace("/", "--"))
+ hashes = (
+ [h for h in os.listdir(importable_directory_path) if len(h) == 64]
+ if os.path.isdir(importable_directory_path)
+ else None
+ )
+ if hashes:
+ # get most recent
+ def _get_modification_time(module_hash):
+ return (
+ (Path(importable_directory_path) / module_hash / (self.name.split("/")[-1] + ".py"))
+ .stat()
+ .st_mtime
+ )
+
+ hash = sorted(hashes, key=_get_modification_time)[-1]
+ warning_msg = (
+ f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
+ f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
+ f"couldn't be found locally at {self.name}"
+ )
+ if not config.HF_DATASETS_OFFLINE:
+ warning_msg += ", or remotely on the Hugging Face Hub."
+ logger.warning(warning_msg)
+ importable_file_path = _get_importable_file_path(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ module_path, hash = _load_importable_file(
+ dynamic_modules_path=dynamic_modules_path,
+ module_namespace="datasets",
+ subdirectory_name=hash,
+ name=self.name,
+ )
+ # make the new module to be noticed by the import system
+ importlib.invalidate_caches()
+ builder_kwargs = {
+ "repo_id": self.name,
+ }
+ return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
+ cache_dir = os.path.expanduser(str(self.cache_dir or config.HF_DATASETS_CACHE))
+ namespace_and_dataset_name = self.name.split("/")
+ namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
+ cached_relative_path = "___".join(namespace_and_dataset_name)
+ cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
+ cached_directory_paths = [
+ cached_directory_path
+ for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
+ if os.path.isdir(cached_directory_path)
+ ]
+ if cached_directory_paths:
+ builder_kwargs = {
+ "repo_id": self.name,
+ "dataset_name": self.name.split("/")[-1],
+ }
+ warning_msg = f"Using the latest cached version of the dataset since {self.name} couldn't be found on the Hugging Face Hub"
+ if config.HF_DATASETS_OFFLINE:
+ warning_msg += " (offline mode is enabled)."
+ logger.warning(warning_msg)
+ return DatasetModule(
+ "datasets.packaged_modules.cache.cache",
+ "auto",
+ {**builder_kwargs, "version": "auto"},
+ )
+ raise FileNotFoundError(f"Dataset {self.name} is not cached in {self.cache_dir}")
+
+
+class CachedMetricModuleFactory(_MetricModuleFactory):
+ """
+ Get the module of a metric that has been loaded once already and cached.
+ The script that is loaded from the cache is the most recent one with a matching name.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ name: str,
+ dynamic_modules_path: Optional[str] = None,
+ ):
+ self.name = name
+ self.dynamic_modules_path = dynamic_modules_path
+ assert self.name.count("/") == 0
+
+ def get_module(self) -> MetricModule:
+ dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
+ importable_directory_path = os.path.join(dynamic_modules_path, "metrics", self.name)
+ hashes = (
+ [h for h in os.listdir(importable_directory_path) if len(h) == 64]
+ if os.path.isdir(importable_directory_path)
+ else None
+ )
+ if not hashes:
+ raise FileNotFoundError(f"Metric {self.name} is not cached in {dynamic_modules_path}")
+ # get most recent
+
+ def _get_modification_time(module_hash):
+ return (Path(importable_directory_path) / module_hash / (self.name + ".py")).stat().st_mtime
+
+ hash = sorted(hashes, key=_get_modification_time)[-1]
+ logger.warning(
+ f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
+ f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
+ f"couldn't be found locally at {self.name}, or remotely on the Hugging Face Hub."
+ )
+ # make the new module to be noticed by the import system
+ module_path = ".".join([os.path.basename(dynamic_modules_path), "metrics", self.name, hash, self.name])
+ importlib.invalidate_caches()
+ return MetricModule(module_path, hash)
+
+
+def dataset_module_factory(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str, DataFilesDict]] = None,
+ cache_dir: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ _require_default_config_name=True,
+ _require_custom_configs=False,
+ **download_kwargs,
+) -> DatasetModule:
+ """
+ Download/extract/cache a dataset module.
+
+ Dataset codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
+
+ Args:
+
+ path (str): Path or name of the dataset.
+ Depending on ``path``, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if ``path`` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. ``'./path/to/directory/with/my/csv/data'``.
+ - if ``path`` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory):
+ -> load the dataset builder from the dataset script
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
+
+ For datasets on the Hugging Face Hub (list all available datasets with ``huggingface_hub.list_datasets()``)
+
+ - if ``path`` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files.
+ - if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. ``glue``, ``squad``, ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
+ By default, the datasets and metrics are stored inside the `datasets_modules` module.
+ data_dir (:obj:`str`, optional): Directory with the data files. Used only if `data_files` is not specified,
+ in which case it's equal to pass `os.path.join(data_dir, "**")` as `data_files`.
+ data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration.
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
+ the attributes in download_config if supplied.
+
+ Returns:
+ DatasetModule
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ download_config.extract_compressed_file = True
+ download_config.force_extract = True
+ download_config.force_download = download_mode == DownloadMode.FORCE_REDOWNLOAD
+
+ filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
+ if not filename.endswith(".py"):
+ filename = filename + ".py"
+ combined_path = os.path.join(path, filename)
+
+ # We have several ways to get a dataset builder:
+ #
+ # - if path is the name of a packaged dataset module
+ # -> use the packaged module (json, csv, etc.)
+ #
+ # - if os.path.join(path, name) is a local python file
+ # -> use the module from the python file
+ # - if path is a local directory (but no python file)
+ # -> use a packaged module (csv, text etc.) based on content of the directory
+ #
+ # - if path has one "/" and is dataset repository on the HF hub with a python file
+ # -> the module from the python file in the dataset repository
+ # - if path has one "/" and is dataset repository on the HF hub without a python file
+ # -> use a packaged module (csv, text etc.) based on content of the repository
+
+ # Try packaged
+ if path in _PACKAGED_DATASETS_MODULES:
+ return PackagedDatasetModuleFactory(
+ path,
+ data_dir=data_dir,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ ).get_module()
+ # Try locally
+ elif path.endswith(filename):
+ if os.path.isfile(path):
+ return LocalDatasetModuleFactoryWithScript(
+ path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(path)}")
+ elif os.path.isfile(combined_path):
+ return LocalDatasetModuleFactoryWithScript(
+ combined_path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ elif os.path.isdir(path):
+ return LocalDatasetModuleFactoryWithoutScript(
+ path, data_dir=data_dir, data_files=data_files, download_mode=download_mode
+ ).get_module()
+ # Try remotely
+ elif is_relative_path(path) and path.count("/") <= 1:
+ try:
+ _raise_if_offline_mode_is_enabled()
+ hf_api = HfApi(config.HF_ENDPOINT)
+ try:
+ dataset_info = hf_api.dataset_info(
+ repo_id=path,
+ revision=revision,
+ token=download_config.token,
+ timeout=100.0,
+ )
+ except Exception as e: # noqa catch any exception of hf_hub and consider that the dataset doesn't exist
+ if isinstance(
+ e,
+ (
+ OfflineModeIsEnabled,
+ requests.exceptions.ConnectTimeout,
+ requests.exceptions.ConnectionError,
+ ),
+ ):
+ raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({type(e).__name__})")
+ elif "404" in str(e):
+ msg = f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed"
+ raise DatasetNotFoundError(msg + f" at revision '{revision}'" if revision else msg)
+ elif "401" in str(e):
+ msg = f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed"
+ msg = msg + f" at revision '{revision}'" if revision else msg
+ raise DatasetNotFoundError(
+ msg
+ + f". If the dataset is private or gated, make sure to log in with `huggingface-cli login` or visit the dataset page at https://huggingface.co/datasets/{path} to ask for access."
+ )
+ else:
+ raise e
+ if filename in [sibling.rfilename for sibling in dataset_info.siblings]: # contains a dataset script
+ fs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
+ if _require_custom_configs or (revision and revision != "main"):
+ can_load_config_from_parquet_export = False
+ elif _require_default_config_name:
+ with fs.open(f"datasets/{path}/{filename}", "r", encoding="utf-8") as f:
+ can_load_config_from_parquet_export = "DEFAULT_CONFIG_NAME" not in f.read()
+ else:
+ can_load_config_from_parquet_export = True
+ if config.USE_PARQUET_EXPORT and can_load_config_from_parquet_export:
+ # If the parquet export is ready (parquet files + info available for the current sha), we can use it instead
+ # This fails when the dataset has multiple configs and a default config and
+ # the user didn't specify a configuration name (_require_default_config_name=True).
+ try:
+ return HubDatasetModuleFactoryWithParquetExport(
+ path, download_config=download_config, revision=dataset_info.sha
+ ).get_module()
+ except _dataset_viewer.DatasetViewerError:
+ pass
+ # Otherwise we must use the dataset script if the user trusts it
+ return HubDatasetModuleFactoryWithScript(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ return HubDatasetModuleFactoryWithoutScript(
+ path,
+ revision=revision,
+ data_dir=data_dir,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ ).get_module()
+ except Exception as e1:
+ # All the attempts failed, before raising the error we should check if the module is already cached
+ try:
+ return CachedDatasetModuleFactory(
+ path, dynamic_modules_path=dynamic_modules_path, cache_dir=cache_dir
+ ).get_module()
+ except Exception:
+ # If it's not in the cache, then it doesn't exist.
+ if isinstance(e1, OfflineModeIsEnabled):
+ raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None
+ if isinstance(e1, (DataFilesNotFoundError, DatasetNotFoundError, EmptyDatasetError)):
+ raise e1 from None
+ if isinstance(e1, FileNotFoundError):
+ raise FileNotFoundError(
+ f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory. "
+ f"Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}"
+ ) from None
+ raise e1 from None
+ else:
+ raise FileNotFoundError(
+ f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory."
+ )
+
+
+@deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+def metric_module_factory(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ trust_remote_code: Optional[bool] = None,
+ **download_kwargs,
+) -> MetricModule:
+ """
+ Download/extract/cache a metric module.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Metrics codes are cached inside the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks).
+
+ Args:
+
+ path (str): Path or name of the metric script.
+
+ - if ``path`` is a local metric script or a directory containing a local metric script (if the script has the same name as the directory):
+ -> load the module from the metric script
+ e.g. ``'./metrics/accuracy'`` or ``'./metrics/accuracy/accuracy.py'``.
+ - if ``path`` is a metric on the Hugging Face Hub (ex: `glue`, `squad`)
+ -> load the module from the metric script in the GitHub repository at huggingface/datasets
+ e.g. ``'accuracy'`` or ``'rouge'``.
+
+ revision (Optional ``Union[str, datasets.Version]``):
+ If specified, the module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
+ By default, the datasets and metrics are stored inside the `datasets_modules` module.
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override
+ the attributes in download_config if supplied.
+
+ Returns:
+ MetricModule
+ """
+ with warnings.catch_warnings():
+ # Ignore equivalent warnings to the one already issued
+ warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning)
+
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ download_config.extract_compressed_file = True
+ download_config.force_extract = True
+
+ filename = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
+ if not filename.endswith(".py"):
+ filename = filename + ".py"
+ combined_path = os.path.join(path, filename)
+ # Try locally
+ if path.endswith(filename):
+ if os.path.isfile(path):
+ return LocalMetricModuleFactory(
+ path,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ else:
+ raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(path)}")
+ elif os.path.isfile(combined_path):
+ return LocalMetricModuleFactory(
+ combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path
+ ).get_module()
+ elif is_relative_path(path) and path.count("/") == 0:
+ try:
+ return GithubMetricModuleFactory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ trust_remote_code=trust_remote_code,
+ ).get_module()
+ except Exception as e1: # noqa all the attempts failed, before raising the error we should check if the module is already cached.
+ try:
+ return CachedMetricModuleFactory(path, dynamic_modules_path=dynamic_modules_path).get_module()
+ except Exception: # noqa if it's not in the cache, then it doesn't exist.
+ if not isinstance(e1, FileNotFoundError):
+ raise e1 from None
+ raise FileNotFoundError(
+ f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}. "
+ f"Metric '{path}' doesn't exist on the Hugging Face Hub either."
+ ) from None
+ else:
+ raise FileNotFoundError(f"Couldn't find a metric script at {relative_to_absolute_path(combined_path)}.")
+
+
+@deprecated("Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate")
+def load_metric(
+ path: str,
+ config_name: Optional[str] = None,
+ process_id: int = 0,
+ num_process: int = 1,
+ cache_dir: Optional[str] = None,
+ experiment_id: Optional[str] = None,
+ keep_in_memory: bool = False,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ trust_remote_code: Optional[bool] = None,
+ **metric_init_kwargs,
+) -> Metric:
+ """Load a `datasets.Metric`.
+
+
+
+ Use `evaluate.load` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+
+ path (``str``):
+ path to the metric processing script with the metric builder. Can be either:
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./metrics/rouge'`` or ``'./metrics/rogue/rouge.py'``
+ - a metric identifier on the HuggingFace datasets repo (list all available metrics with ``datasets.list_metrics()``)
+ e.g. ``'rouge'`` or ``'bleu'``
+ config_name (:obj:`str`, optional): selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset)
+ process_id (:obj:`int`, optional): for distributed evaluation: id of the process
+ num_process (:obj:`int`, optional): for distributed evaluation: total number of processes
+ cache_dir (Optional str): path to store the temporary predictions and references (default to `~/.cache/huggingface/metrics/`)
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ keep_in_memory (bool): Whether to store the temporary results in memory (defaults to False)
+ download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ revision (Optional ``Union[str, datasets.Version]``): if specified, the module will be loaded from the datasets repository
+ at this version. By default, it is set to the local version of the lib. Specifying a version that is different from
+ your local version of the lib might cause compatibility issues.
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+
+ Returns:
+ `datasets.Metric`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> accuracy = load_metric('accuracy')
+ >>> accuracy.compute(references=[1, 0], predictions=[1, 1])
+ {'accuracy': 0.5}
+ ```
+ """
+ with warnings.catch_warnings():
+ # Ignore equivalent warnings to the one already issued
+ warnings.filterwarnings("ignore", message=".*https://huggingface.co/docs/evaluate$", category=FutureWarning)
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ metric_module = metric_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ trust_remote_code=trust_remote_code,
+ ).module_path
+ metric_cls = import_main_class(metric_module, dataset=False)
+ metric = metric_cls(
+ config_name=config_name,
+ process_id=process_id,
+ num_process=num_process,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ experiment_id=experiment_id,
+ **metric_init_kwargs,
+ )
+
+ # Download and prepare resources for the metric
+ metric.download_and_prepare(download_config=download_config)
+
+ return metric
+
+
+def load_dataset_builder(
+ path: str,
+ name: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ cache_dir: Optional[str] = None,
+ features: Optional[Features] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ storage_options: Optional[Dict] = None,
+ trust_remote_code: Optional[bool] = None,
+ _require_default_config_name=True,
+ **config_kwargs,
+) -> DatasetBuilder:
+ """Load a dataset builder from the Hugging Face Hub, or a local dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.)
+ without downloading the dataset itself.
+
+ You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
+
+ A dataset is a directory that contains:
+
+ - some data files in generic formats (JSON, CSV, Parquet, text, etc.)
+ - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
+
+ Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
+
+ Args:
+
+ path (`str`):
+ Path or name of the dataset.
+ Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if `path` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. `'./path/to/directory/with/my/csv/data'`.
+ - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+
+ For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
+
+ - if `path` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
+ - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_dir (`str`, *optional*):
+ Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
+ the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+ features ([`Features`], *optional*):
+ Set the features type to use for this dataset.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+ storage_options (`dict`, *optional*, defaults to `None`):
+ **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the [`BuilderConfig`]
+ and used in the [`DatasetBuilder`].
+
+ Returns:
+ [`DatasetBuilder`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.info.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ if token is not None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ download_config.token = token
+ if storage_options is not None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ download_config.storage_options.update(storage_options)
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ data_dir=data_dir,
+ data_files=data_files,
+ cache_dir=cache_dir,
+ trust_remote_code=trust_remote_code,
+ _require_default_config_name=_require_default_config_name,
+ _require_custom_configs=bool(config_kwargs),
+ )
+ # Get dataset builder class from the processing script
+ builder_kwargs = dataset_module.builder_kwargs
+ data_dir = builder_kwargs.pop("data_dir", data_dir)
+ data_files = builder_kwargs.pop("data_files", data_files)
+ config_name = builder_kwargs.pop(
+ "config_name", name or dataset_module.builder_configs_parameters.default_config_name
+ )
+ dataset_name = builder_kwargs.pop("dataset_name", None)
+ info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None
+
+ if (
+ path in _PACKAGED_DATASETS_MODULES
+ and data_files is None
+ and dataset_module.builder_configs_parameters.builder_configs[0].data_files is None
+ ):
+ error_msg = f"Please specify the data files or data directory to load for the {path} dataset builder."
+ example_extensions = [
+ extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path
+ ]
+ if example_extensions:
+ error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`'
+ raise ValueError(error_msg)
+
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name)
+ # Instantiate the dataset builder
+ builder_instance: DatasetBuilder = builder_cls(
+ cache_dir=cache_dir,
+ dataset_name=dataset_name,
+ config_name=config_name,
+ data_dir=data_dir,
+ data_files=data_files,
+ hash=dataset_module.hash,
+ info=info,
+ features=features,
+ token=token,
+ storage_options=storage_options,
+ **builder_kwargs,
+ **config_kwargs,
+ )
+ builder_instance._use_legacy_cache_dir_if_possible(dataset_module)
+
+ return builder_instance
+
+
+def load_dataset(
+ path: str,
+ name: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ split: Optional[Union[str, Split]] = None,
+ cache_dir: Optional[str] = None,
+ features: Optional[Features] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ save_infos: bool = False,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ task="deprecated",
+ streaming: bool = False,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[Dict] = None,
+ trust_remote_code: bool = None,
+ **config_kwargs,
+) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
+ """Load a dataset from the Hugging Face Hub, or a local dataset.
+
+ You can find the list of datasets on the [Hub](https://huggingface.co/datasets) or with [`huggingface_hub.list_datasets`].
+
+ A dataset is a directory that contains:
+
+ - some data files in generic formats (JSON, CSV, Parquet, text, etc.).
+ - and optionally a dataset script, if it requires some code to read the data files. This is used to load any kind of formats or structures.
+
+ Note that dataset scripts can also download and read data files from anywhere - in case your data files already exist online.
+
+ This function does the following under the hood:
+
+ 1. Download and import in the library the dataset script from `path` if it's not already cached inside the library.
+
+ If the dataset has no dataset script, then a generic dataset script is imported instead (JSON, CSV, Parquet, text, etc.)
+
+ Dataset scripts are small python scripts that define dataset builders. They define the citation, info and format of the dataset,
+ contain the path or URL to the original data files and the code to load examples from the original data files.
+
+ You can find the complete list of datasets in the Datasets [Hub](https://huggingface.co/datasets).
+
+ 2. Run the dataset script which will:
+
+ * Download the dataset file from the original URL (see the script) if it's not already available locally or cached.
+ * Process and cache the dataset in typed Arrow tables for caching.
+
+ Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python generic types.
+ They can be directly accessed from disk, loaded in RAM or even streamed over the web.
+
+ 3. Return a dataset built from the requested splits in `split` (default: all).
+
+ It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script.
+ In this case, it automatically loads all the data files from the directory or the dataset repository.
+
+ Args:
+
+ path (`str`):
+ Path or name of the dataset.
+ Depending on `path`, the dataset builder that is used comes from a generic dataset script (JSON, CSV, Parquet, text etc.) or from the dataset script (a python file) inside the dataset directory.
+
+ For local datasets:
+
+ - if `path` is a local directory (containing data files only)
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
+ e.g. `'./path/to/directory/with/my/csv/data'`.
+ - if `path` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+
+ For datasets on the Hugging Face Hub (list all available datasets with [`huggingface_hub.list_datasets`])
+
+ - if `path` is a dataset repository on the HF hub (containing data files only)
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
+ e.g. `'username/dataset_name'`, a dataset repository on the HF hub containing your data files.
+ - if `path` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
+ -> load the dataset builder from the dataset script in the dataset repository
+ e.g. `glue`, `squad`, `'username/dataset_name'`, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
+
+ name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_dir (`str`, *optional*):
+ Defining the `data_dir` of the dataset configuration. If specified for the generic builders (csv, text etc.) or the Hub datasets and `data_files` is `None`,
+ the behavior is equal to passing `os.path.join(data_dir, **)` as `data_files` to reference all the files in a directory.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ split (`Split` or `str`):
+ Which split of the data to load.
+ If `None`, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
+ If given, will return a single Dataset.
+ Splits can be combined and specified like in tensorflow-datasets.
+ cache_dir (`str`, *optional*):
+ Directory to read/write data. Defaults to `"~/.cache/huggingface/datasets"`.
+ features (`Features`, *optional*):
+ Set the features type to use for this dataset.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the dataset
+ will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
+ nonzero. See more details in the [improve performance](../cache#improve-performance) section.
+ save_infos (`bool`, defaults to `False`):
+ Save the dataset information (checksums/size/splits/...).
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+ task (`str`):
+ The task to prepare the dataset for during training and evaluation. Casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
+
+
+
+ `task` was deprecated in version 2.13.0 and will be removed in 3.0.0.
+
+
+ streaming (`bool`, defaults to `False`):
+ If set to `True`, don't download the data files. Instead, it streams the data progressively while
+ iterating on the dataset. An [`IterableDataset`] or [`IterableDatasetDict`] is returned instead in this case.
+
+ Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
+ Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
+ like rar and xz are not yet supported. The tgz format doesn't allow streaming.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*, defaults to `None`):
+ **Experimental**. Key/value pairs to be passed on to the dataset file-system backend, if any.
+
+
+ trust_remote_code (`bool`, defaults to `True`):
+ Whether or not to allow for datasets defined on the Hub using a dataset script. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+
+
+
+ `trust_remote_code` will default to False in the next major release.
+
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the `BuilderConfig`
+ and used in the [`DatasetBuilder`].
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - if `split` is not `None`: the dataset requested,
+ - if `split` is `None`, a [`~datasets.DatasetDict`] with each split.
+
+ or [`IterableDataset`] or [`IterableDatasetDict`]: if `streaming=True`
+
+ - if `split` is not `None`, the dataset is requested
+ - if `split` is `None`, a [`~datasets.streaming.IterableDatasetDict`] with each split.
+
+ Example:
+
+ Load a dataset from the Hugging Face Hub:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='train')
+
+ # Map data files to splits
+ >>> data_files = {'train': 'train.csv', 'test': 'test.csv'}
+ >>> ds = load_dataset('namespace/your_dataset_name', data_files=data_files)
+ ```
+
+ Load a local dataset:
+
+ ```py
+ # Load a CSV file
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('csv', data_files='path/to/local/my_dataset.csv')
+
+ # Load a JSON file
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('json', data_files='path/to/local/my_dataset.json')
+
+ # Load from a local loading script
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('path/to/local/loading_script/loading_script.py', split='train')
+ ```
+
+ Load an [`~datasets.IterableDataset`]:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='train', streaming=True)
+ ```
+
+ Load an image dataset with the `ImageFolder` dataset builder:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('imagefolder', data_dir='/path/to/images', split='train')
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if ignore_verifications != "deprecated":
+ verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if task != "deprecated":
+ warnings.warn(
+ "'task' was deprecated in version 2.13.0 and will be removed in 3.0.0.\n",
+ FutureWarning,
+ )
+ else:
+ task = None
+ if data_files is not None and not data_files:
+ raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).")
+ if Path(path, config.DATASET_STATE_JSON_FILENAME).exists():
+ raise ValueError(
+ "You are trying to load a dataset that was saved using `save_to_disk`. "
+ "Please use `load_from_disk` instead."
+ )
+
+ if streaming and num_proc is not None:
+ raise NotImplementedError(
+ "Loading a streaming dataset in parallel with `num_proc` is not implemented. "
+ "To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead."
+ )
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ verification_mode = VerificationMode(
+ (verification_mode or VerificationMode.BASIC_CHECKS) if not save_infos else VerificationMode.ALL_CHECKS
+ )
+
+ # Create a dataset builder
+ builder_instance = load_dataset_builder(
+ path=path,
+ name=name,
+ data_dir=data_dir,
+ data_files=data_files,
+ cache_dir=cache_dir,
+ features=features,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ storage_options=storage_options,
+ trust_remote_code=trust_remote_code,
+ _require_default_config_name=name is None,
+ **config_kwargs,
+ )
+
+ # Return iterable dataset in case of streaming
+ if streaming:
+ return builder_instance.as_streaming_dataset(split=split)
+
+ # Download and prepare data
+ builder_instance.download_and_prepare(
+ download_config=download_config,
+ download_mode=download_mode,
+ verification_mode=verification_mode,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ )
+
+ # Build dataset for splits
+ keep_in_memory = (
+ keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
+ )
+ ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory)
+ # Rename and cast features to match task schema
+ if task is not None:
+ # To avoid issuing the same warning twice
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", FutureWarning)
+ ds = ds.prepare_for_task(task)
+ if save_infos:
+ builder_instance._save_infos()
+
+ return ds
+
+
+def load_from_disk(
+ dataset_path: str, fs="deprecated", keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None
+) -> Union[Dataset, DatasetDict]:
+ """
+ Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or
+ from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g.
+ `"s3://my-bucket/dataset/train"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset will be
+ loaded from.
+ fs (`~filesystems.S3FileSystem` or `fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the dataset
+ will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
+ nonzero. See more details in the [improve performance](../cache#improve-performance) section.
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - If `dataset_path` is a path of a dataset directory: the dataset requested.
+ - If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_from_disk
+ >>> ds = load_from_disk('path/to/dataset/directory')
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, *_ = url_to_fs(dataset_path, **(storage_options or {}))
+ if not fs.exists(dataset_path):
+ raise FileNotFoundError(f"Directory {dataset_path} not found")
+ if fs.isfile(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile(
+ posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ ):
+ return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
+ elif fs.isfile(posixpath.join(dataset_path, config.DATASETDICT_JSON_FILENAME)):
+ return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options)
+ else:
+ raise FileNotFoundError(
+ f"Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory."
+ )
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/metric.py b/llmeval-env/lib/python3.10/site-packages/datasets/metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..187c5e5c925b71b26ca83021523dd55c28989d28
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/metric.py
@@ -0,0 +1,652 @@
+# Copyright 2020 The HuggingFace Datasets Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Metrics base class."""
+
+import os
+import types
+import uuid
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import pyarrow as pa
+from filelock import BaseFileLock, Timeout
+
+from . import config
+from .arrow_dataset import Dataset
+from .arrow_reader import ArrowReader
+from .arrow_writer import ArrowWriter
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager
+from .features import Features
+from .info import DatasetInfo, MetricInfo
+from .naming import camelcase_to_snakecase
+from .utils._filelock import FileLock
+from .utils.deprecation_utils import deprecated
+from .utils.logging import get_logger
+from .utils.py_utils import copyfunc, temp_seed
+
+
+logger = get_logger(__name__)
+
+
+class FileFreeLock(BaseFileLock):
+ """Thread lock until a file **cannot** be locked"""
+
+ def __init__(self, lock_file, *args, **kwargs):
+ self.filelock = FileLock(lock_file)
+ super().__init__(self.filelock.lock_file, *args, **kwargs)
+
+ def _acquire(self):
+ try:
+ self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
+ except Timeout:
+ # We couldn't acquire the lock, the file is locked!
+ self._context.lock_file_fd = self.filelock.lock_file
+ else:
+ # We were able to acquire the lock, the file is not yet locked!
+ self.filelock.release()
+ self._context.lock_file_fd = None
+
+ def _release(self):
+ self._context.lock_file_fd = None
+
+
+# lists - summarize long lists similarly to NumPy
+# arrays/tensors - let the frameworks control formatting
+def summarize_if_long_list(obj):
+ if not type(obj) == list or len(obj) <= 6: # noqa: E721
+ return f"{obj}"
+
+ def format_chunk(chunk):
+ return ", ".join(repr(x) for x in chunk)
+
+ return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
+
+
+class MetricInfoMixin:
+ """This base class exposes some attributes of MetricInfo
+ at the base level of the Metric for easy access.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ """
+
+ def __init__(self, info: MetricInfo):
+ self._metric_info = info
+
+ @property
+ def info(self):
+ """:class:`datasets.MetricInfo` object containing all the metadata in the metric."""
+ return self._metric_info
+
+ @property
+ def name(self) -> str:
+ return self._metric_info.metric_name
+
+ @property
+ def experiment_id(self) -> Optional[str]:
+ return self._metric_info.experiment_id
+
+ @property
+ def description(self) -> str:
+ return self._metric_info.description
+
+ @property
+ def citation(self) -> str:
+ return self._metric_info.citation
+
+ @property
+ def features(self) -> Features:
+ return self._metric_info.features
+
+ @property
+ def inputs_description(self) -> str:
+ return self._metric_info.inputs_description
+
+ @property
+ def homepage(self) -> Optional[str]:
+ return self._metric_info.homepage
+
+ @property
+ def license(self) -> str:
+ return self._metric_info.license
+
+ @property
+ def codebase_urls(self) -> Optional[List[str]]:
+ return self._metric_info.codebase_urls
+
+ @property
+ def reference_urls(self) -> Optional[List[str]]:
+ return self._metric_info.reference_urls
+
+ @property
+ def streamable(self) -> bool:
+ return self._metric_info.streamable
+
+ @property
+ def format(self) -> Optional[str]:
+ return self._metric_info.format
+
+
+class Metric(MetricInfoMixin):
+ """A Metric is the base class and common API for all metrics.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
+ to be overridden when the metric loading script is modified.
+ keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings.
+ cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
+ The data directory should be located on a shared file-system in distributed setups.
+ num_process (``int``): specify the total number of nodes in a distributed settings.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
+ timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ config_name: Optional[str] = None,
+ keep_in_memory: bool = False,
+ cache_dir: Optional[str] = None,
+ num_process: int = 1,
+ process_id: int = 0,
+ seed: Optional[int] = None,
+ experiment_id: Optional[str] = None,
+ max_concurrent_cache_files: int = 10000,
+ timeout: Union[int, float] = 100,
+ **kwargs,
+ ):
+ # prepare info
+ self.config_name = config_name or "default"
+ info = self._info()
+ info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
+ info.config_name = self.config_name
+ info.experiment_id = experiment_id or "default_experiment"
+ MetricInfoMixin.__init__(self, info) # For easy access on low level
+
+ # Safety checks on num_process and process_id
+ if not isinstance(process_id, int) or process_id < 0:
+ raise ValueError("'process_id' should be a number greater than 0")
+ if not isinstance(num_process, int) or num_process <= process_id:
+ raise ValueError("'num_process' should be a number greater than process_id")
+ if keep_in_memory and num_process != 1:
+ raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
+
+ self.num_process = num_process
+ self.process_id = process_id
+ self.max_concurrent_cache_files = max_concurrent_cache_files
+
+ self.keep_in_memory = keep_in_memory
+ self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
+ self.data_dir = self._build_data_dir()
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ self.seed: int = seed[pos] if pos < 624 else seed[0]
+ else:
+ self.seed: int = seed
+ self.timeout: Union[int, float] = timeout
+
+ # Update 'compute' and 'add' docstring
+ # methods need to be copied otherwise it changes the docstrings of every instance
+ self.compute = types.MethodType(copyfunc(self.compute), self)
+ self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
+ self.add = types.MethodType(copyfunc(self.add), self)
+ self.compute.__func__.__doc__ += self.info.inputs_description
+ self.add_batch.__func__.__doc__ += self.info.inputs_description
+ self.add.__func__.__doc__ += self.info.inputs_description
+
+ # self.arrow_schema = pa.schema(field for field in self.info.features.type)
+ self.buf_writer = None
+ self.writer = None
+ self.writer_batch_size = None
+ self.data = None
+
+ # This is the cache file we store our predictions/references in
+ # Keep it None for now so we can (cloud)pickle the object
+ self.cache_file_name = None
+ self.filelock = None
+ self.rendez_vous_lock = None
+
+ # This is all the cache files on which we have a lock when we are in a distributed setting
+ self.file_paths = None
+ self.filelocks = None
+
+ def __len__(self):
+ """Return the number of examples (predictions or predictions/references pair)
+ currently stored in the metric's cache.
+ """
+ return 0 if self.writer is None else len(self.writer)
+
+ def __repr__(self):
+ return (
+ f'Metric(name: "{self.name}", features: {self.features}, '
+ f'usage: """{self.inputs_description}""", '
+ f"stored examples: {len(self)})"
+ )
+
+ def _build_data_dir(self):
+ """Path of this metric in cache_dir:
+ Will be:
+ self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ builder_data_dir = self._data_dir_root
+ builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
+ os.makedirs(builder_data_dir, exist_ok=True)
+ return builder_data_dir
+
+ def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
+ """Create a new cache file. If the default cache file is used, we generated a new hash."""
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
+ filelock = None
+ for i in range(self.max_concurrent_cache_files):
+ filelock = FileLock(file_path + ".lock")
+ try:
+ filelock.acquire(timeout=timeout)
+ except Timeout:
+ # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
+ # We raise an error
+ if self.num_process != 1:
+ raise ValueError(
+ f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
+ f"between distributed metric instances."
+ ) from None
+ if i == self.max_concurrent_cache_files - 1:
+ raise ValueError(
+ f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
+ f"You should set a larger value of max_concurrent_cache_files when creating the metric "
+ f"(current value is {self.max_concurrent_cache_files})."
+ ) from None
+ # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
+ file_uuid = str(uuid.uuid4())
+ file_path = os.path.join(
+ self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
+ )
+ else:
+ break
+
+ return file_path, filelock
+
+ def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
+ """Get a lock on all the cache files in a distributed setup.
+ We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
+ """
+ if self.num_process == 1:
+ if self.cache_file_name is None:
+ raise ValueError(
+ "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
+ "at least once before calling `compute`."
+ )
+ file_paths = [self.cache_file_name]
+ else:
+ file_paths = [
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
+ for process_id in range(self.num_process)
+ ]
+
+ # Let's acquire a lock on each process files to be sure they are finished writing
+ filelocks = []
+ for process_id, file_path in enumerate(file_paths):
+ if process_id == 0: # process 0 already has its lock file
+ filelocks.append(self.filelock)
+ else:
+ filelock = FileLock(file_path + ".lock")
+ try:
+ filelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Cannot acquire lock on cached file {file_path} for process {process_id}."
+ ) from None
+ else:
+ filelocks.append(filelock)
+
+ return file_paths, filelocks
+
+ def _check_all_processes_locks(self):
+ expected_lock_file_names = [
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
+ for process_id in range(self.num_process)
+ ]
+ for expected_lock_file_name in expected_lock_file_names:
+ nofilelock = FileFreeLock(expected_lock_file_name)
+ try:
+ nofilelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
+ ) from None
+ else:
+ nofilelock.release()
+
+ def _check_rendez_vous(self):
+ expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
+ nofilelock = FileFreeLock(expected_lock_file_name)
+ try:
+ nofilelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
+ ) from None
+ else:
+ nofilelock.release()
+ lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
+ rendez_vous_lock = FileLock(lock_file_name)
+ try:
+ rendez_vous_lock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
+ else:
+ rendez_vous_lock.release()
+
+ def _finalize(self):
+ """Close all the writing process and load/gather the data
+ from all the nodes if main node or all_process is True.
+ """
+ if self.writer is not None:
+ self.writer.finalize()
+ self.writer = None
+ # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
+ if self.filelock is not None and self.process_id > 0:
+ self.filelock.release()
+
+ if self.keep_in_memory:
+ # Read the predictions and references
+ reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
+ self.data = Dataset.from_buffer(self.buf_writer.getvalue())
+
+ elif self.process_id == 0:
+ # Let's acquire a lock on each node files to be sure they are finished writing
+ file_paths, filelocks = self._get_all_cache_files()
+
+ # Read the predictions and references
+ try:
+ reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
+ self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
+ except FileNotFoundError:
+ raise ValueError(
+ "Error in finalize: another metric instance is already using the local cache file. "
+ "Please specify an experiment_id to avoid collision between distributed metric instances."
+ ) from None
+
+ # Store file paths and locks and we will release/delete them after the computation.
+ self.file_paths = file_paths
+ self.filelocks = filelocks
+
+ def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
+ """Compute the metrics.
+
+ Usage of positional arguments is not allowed to prevent mistakes.
+
+ Args:
+ predictions (list/array/tensor, optional): Predictions.
+ references (list/array/tensor, optional): References.
+ **kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute`
+ method (see details in the docstring).
+
+ Return:
+ dict or None
+
+ - Dictionary with the metrics if this metric is run on the main process (``process_id == 0``).
+ - None if the metric is not run on the main process (``process_id != 0``).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> accuracy = metric.compute(predictions=model_prediction, references=labels)
+ ```
+ """
+ all_kwargs = {"predictions": predictions, "references": references, **kwargs}
+ if predictions is None and references is None:
+ missing_kwargs = {k: None for k in self.features if k not in all_kwargs}
+ all_kwargs.update(missing_kwargs)
+ else:
+ missing_inputs = [k for k in self.features if k not in all_kwargs]
+ if missing_inputs:
+ raise ValueError(
+ f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}"
+ )
+ inputs = {input_name: all_kwargs[input_name] for input_name in self.features}
+ compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features}
+
+ if any(v is not None for v in inputs.values()):
+ self.add_batch(**inputs)
+ self._finalize()
+
+ self.cache_file_name = None
+ self.filelock = None
+
+ if self.process_id == 0:
+ self.data.set_format(type=self.info.format)
+
+ inputs = {input_name: self.data[input_name] for input_name in self.features}
+ with temp_seed(self.seed):
+ output = self._compute(**inputs, **compute_kwargs)
+
+ if self.buf_writer is not None:
+ self.buf_writer = None
+ del self.data
+ self.data = None
+ else:
+ # Release locks and delete all the cache files. Process 0 is released last.
+ for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
+ logger.info(f"Removing {file_path}")
+ del self.data
+ self.data = None
+ del self.writer
+ self.writer = None
+ os.remove(file_path)
+ filelock.release()
+
+ return output
+ else:
+ return None
+
+ def add_batch(self, *, predictions=None, references=None, **kwargs):
+ """Add a batch of predictions and references for the metric's stack.
+
+ Args:
+ predictions (list/array/tensor, optional): Predictions.
+ references (list/array/tensor, optional): References.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.add_batch(predictions=model_prediction, references=labels)
+ ```
+ """
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
+ if bad_inputs:
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
+ batch = {"predictions": predictions, "references": references, **kwargs}
+ batch = {intput_name: batch[intput_name] for intput_name in self.features}
+ batch = self.info.features.encode_batch(batch)
+ if self.writer is None:
+ self._init_writer()
+ try:
+ self.writer.write_batch(batch)
+ except pa.ArrowInvalid:
+ if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
+ col0 = next(iter(batch))
+ bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
+ error_msg = (
+ f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
+ )
+ elif sorted(self.features) != ["references", "predictions"]:
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
+ error_msg_inputs = ",\n".join(
+ f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features
+ )
+ error_msg += error_msg_inputs
+ else:
+ error_msg = (
+ f"Predictions and/or references don't match the expected format.\n"
+ f"Expected format: {self.features},\n"
+ f"Input predictions: {summarize_if_long_list(predictions)},\n"
+ f"Input references: {summarize_if_long_list(references)}"
+ )
+ raise ValueError(error_msg) from None
+
+ def add(self, *, prediction=None, reference=None, **kwargs):
+ """Add one prediction and reference for the metric's stack.
+
+ Args:
+ prediction (list/array/tensor, optional): Predictions.
+ reference (list/array/tensor, optional): References.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.add(predictions=model_predictions, references=labels)
+ ```
+ """
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
+ if bad_inputs:
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
+ example = {"predictions": prediction, "references": reference, **kwargs}
+ example = {intput_name: example[intput_name] for intput_name in self.features}
+ example = self.info.features.encode_example(example)
+ if self.writer is None:
+ self._init_writer()
+ try:
+ self.writer.write(example)
+ except pa.ArrowInvalid:
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
+ error_msg_inputs = ",\n".join(
+ f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features
+ )
+ error_msg += error_msg_inputs
+ raise ValueError(error_msg) from None
+
+ def _init_writer(self, timeout=1):
+ if self.num_process > 1:
+ if self.process_id == 0:
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
+ self.rendez_vous_lock = FileLock(file_path)
+ try:
+ self.rendez_vous_lock.acquire(timeout=timeout)
+ except TimeoutError:
+ raise ValueError(
+ f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
+ f"between distributed metric instances."
+ ) from None
+
+ if self.keep_in_memory:
+ self.buf_writer = pa.BufferOutputStream()
+ self.writer = ArrowWriter(
+ features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
+ )
+ else:
+ self.buf_writer = None
+
+ # Get cache file name and lock it
+ if self.cache_file_name is None or self.filelock is None:
+ cache_file_name, filelock = self._create_cache_file() # get ready
+ self.cache_file_name = cache_file_name
+ self.filelock = filelock
+
+ self.writer = ArrowWriter(
+ features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
+ )
+ # Setup rendez-vous here if
+ if self.num_process > 1:
+ if self.process_id == 0:
+ self._check_all_processes_locks() # wait for everyone to be ready
+ self.rendez_vous_lock.release() # let everyone go
+ else:
+ self._check_rendez_vous() # wait for master to be ready and to let everyone go
+
+ def _info(self) -> MetricInfo:
+ """Construct the MetricInfo object. See `MetricInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (MetricInfo) The metrics information
+ """
+ raise NotImplementedError
+
+ def download_and_prepare(
+ self,
+ download_config: Optional[DownloadConfig] = None,
+ dl_manager: Optional[DownloadManager] = None,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ dl_manager (:class:`DownloadManager`, optional): Specific download manager to use.
+ """
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig()
+ download_config.cache_dir = os.path.join(self.data_dir, "downloads")
+ download_config.force_download = False
+
+ dl_manager = DownloadManager(
+ dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
+ )
+
+ self._download_and_prepare(dl_manager)
+
+ def _download_and_prepare(self, dl_manager):
+ """Downloads and prepares resources for the metric.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required resources for the metric.
+
+ Args:
+ dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
+ """
+ return None
+
+ def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
+ """This method defines the common API for all the metrics in the library"""
+ raise NotImplementedError
+
+ def __del__(self):
+ if hasattr(self, "filelock") and self.filelock is not None:
+ self.filelock.release()
+ if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
+ self.rendez_vous_lock.release()
+ if hasattr(self, "writer"): # in case it was already deleted
+ del self.writer
+ if hasattr(self, "data"): # in case it was already deleted
+ del self.data
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/naming.py b/llmeval-env/lib/python3.10/site-packages/datasets/naming.py
new file mode 100644
index 0000000000000000000000000000000000000000..65e7ede10dcde8701823223ae98e7971f705f945
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/naming.py
@@ -0,0 +1,84 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Utilities for file names."""
+
+import itertools
+import os
+import re
+
+
+_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
+_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
+
+_single_underscore_re = re.compile(r"(?:/\|?*"
+
+
+def camelcase_to_snakecase(name):
+ """Convert camel-case string to snake-case."""
+ name = _uppercase_uppercase_re.sub(r"\1_\2", name)
+ name = _lowercase_uppercase_re.sub(r"\1_\2", name)
+ return name.lower()
+
+
+def snakecase_to_camelcase(name):
+ """Convert snake-case string to camel-case string."""
+ name = _single_underscore_re.split(name)
+ name = [_multiple_underscores_re.split(n) for n in name]
+ return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
+
+
+def filename_prefix_for_name(name):
+ if os.path.basename(name) != name:
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
+ return camelcase_to_snakecase(name)
+
+
+def filename_prefix_for_split(name, split):
+ if os.path.basename(name) != name:
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
+ return f"{filename_prefix_for_name(name)}-{split}"
+
+
+def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
+ prefix = filename_prefix_for_split(dataset_name, split)
+ if filetype_suffix:
+ prefix += f".{filetype_suffix}"
+ filepath = os.path.join(data_dir, prefix)
+ return f"{filepath}*"
+
+
+def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
+ prefix = filename_prefix_for_split(dataset_name, split)
+ prefix = os.path.join(path, prefix)
+
+ if shard_lengths:
+ num_shards = len(shard_lengths)
+ filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
+ if filetype_suffix:
+ filenames = [filename + f".{filetype_suffix}" for filename in filenames]
+ return filenames
+ else:
+ filename = prefix
+ if filetype_suffix:
+ filename += f".{filetype_suffix}"
+ return [filename]
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3889669bfe43ac1ee0a45066d6e676fad105fc35
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..390fe1068d80a9e03ec4040a0880f2b8c006793d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6bdc2d2afb8cd7e9091cc13d7251ee75971a0bf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f6f465c8b68b35a2c64703040c66a7f3457caa5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..9085b22078b6010ce1e4137573b5b884f56f487b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py
@@ -0,0 +1,207 @@
+import glob
+import json
+import os
+import shutil
+import time
+import warnings
+from pathlib import Path
+from typing import List, Optional, Tuple, Union
+
+import pyarrow as pa
+
+import datasets
+import datasets.config
+import datasets.data_files
+from datasets.naming import camelcase_to_snakecase, filenames_for_dataset_split
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+def _get_modification_time(cached_directory_path):
+ return (Path(cached_directory_path)).stat().st_mtime
+
+
+def _find_hash_in_cache(
+ dataset_name: str,
+ config_name: Optional[str],
+ cache_dir: Optional[str],
+ config_kwargs: dict,
+ custom_features: Optional[datasets.Features],
+) -> Tuple[str, str, str]:
+ if config_name or config_kwargs or custom_features:
+ config_id = datasets.BuilderConfig(config_name or "default").create_config_id(
+ config_kwargs=config_kwargs, custom_features=custom_features
+ )
+ else:
+ config_id = None
+ cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE))
+ namespace_and_dataset_name = dataset_name.split("/")
+ namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
+ cached_relative_path = "___".join(namespace_and_dataset_name)
+ cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
+ cached_directory_paths = [
+ cached_directory_path
+ for cached_directory_path in glob.glob(
+ os.path.join(cached_datasets_directory_path_root, config_id or "*", "*", "*")
+ )
+ if os.path.isdir(cached_directory_path)
+ and (
+ config_kwargs
+ or custom_features
+ or json.loads(Path(cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]
+ == Path(cached_directory_path).parts[-3] # no extra params => config_id == config_name
+ )
+ ]
+ if not cached_directory_paths:
+ cached_directory_paths = [
+ cached_directory_path
+ for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
+ if os.path.isdir(cached_directory_path)
+ ]
+ available_configs = sorted(
+ {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths}
+ )
+ raise ValueError(
+ f"Couldn't find cache for {dataset_name}"
+ + (f" for config '{config_id}'" if config_id else "")
+ + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "")
+ )
+ # get most recent
+ cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1])
+ version, hash = cached_directory_path.parts[-2:]
+ other_configs = [
+ Path(_cached_directory_path).parts[-3]
+ for _cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash))
+ if os.path.isdir(_cached_directory_path)
+ and (
+ config_kwargs
+ or custom_features
+ or json.loads(Path(_cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]
+ == Path(_cached_directory_path).parts[-3] # no extra params => config_id == config_name
+ )
+ ]
+ if not config_id and len(other_configs) > 1:
+ raise ValueError(
+ f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}"
+ f"\nPlease specify which configuration to reload from the cache, e.g."
+ f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')"
+ )
+ config_name = cached_directory_path.parts[-3]
+ warning_msg = (
+ f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} "
+ f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})."
+ )
+ logger.warning(warning_msg)
+ return config_name, version, hash
+
+
+class Cache(datasets.ArrowBasedBuilder):
+ def __init__(
+ self,
+ cache_dir: Optional[str] = None,
+ dataset_name: Optional[str] = None,
+ config_name: Optional[str] = None,
+ version: Optional[str] = "0.0.0",
+ hash: Optional[str] = None,
+ base_path: Optional[str] = None,
+ info: Optional[datasets.DatasetInfo] = None,
+ features: Optional[datasets.Features] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ repo_id: Optional[str] = None,
+ data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None,
+ data_dir: Optional[str] = None,
+ storage_options: Optional[dict] = None,
+ writer_batch_size: Optional[int] = None,
+ name="deprecated",
+ **config_kwargs,
+ ):
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if name != "deprecated":
+ warnings.warn(
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+ config_name = name
+ if repo_id is None and dataset_name is None:
+ raise ValueError("repo_id or dataset_name is required for the Cache dataset builder")
+ if data_files is not None:
+ config_kwargs["data_files"] = data_files
+ if data_dir is not None:
+ config_kwargs["data_dir"] = data_dir
+ if hash == "auto" and version == "auto":
+ config_name, version, hash = _find_hash_in_cache(
+ dataset_name=repo_id or dataset_name,
+ config_name=config_name,
+ cache_dir=cache_dir,
+ config_kwargs=config_kwargs,
+ custom_features=features,
+ )
+ elif hash == "auto" or version == "auto":
+ raise NotImplementedError("Pass both hash='auto' and version='auto' instead")
+ super().__init__(
+ cache_dir=cache_dir,
+ dataset_name=dataset_name,
+ config_name=config_name,
+ version=version,
+ hash=hash,
+ base_path=base_path,
+ info=info,
+ token=token,
+ repo_id=repo_id,
+ storage_options=storage_options,
+ writer_batch_size=writer_batch_size,
+ )
+
+ def _info(self) -> datasets.DatasetInfo:
+ return datasets.DatasetInfo()
+
+ def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs):
+ if not os.path.exists(self.cache_dir):
+ raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}")
+ if output_dir is not None and output_dir != self.cache_dir:
+ shutil.copytree(self.cache_dir, output_dir)
+
+ def _split_generators(self, dl_manager):
+ # used to stream from cache
+ if isinstance(self.info.splits, datasets.SplitDict):
+ split_infos: List[datasets.SplitInfo] = list(self.info.splits.values())
+ else:
+ raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}")
+ return [
+ datasets.SplitGenerator(
+ name=split_info.name,
+ gen_kwargs={
+ "files": filenames_for_dataset_split(
+ self.cache_dir,
+ dataset_name=self.dataset_name,
+ split=split_info.name,
+ filetype_suffix="arrow",
+ shard_lengths=split_info.shard_lengths,
+ )
+ },
+ )
+ for split_info in split_infos
+ ]
+
+ def _generate_tables(self, files):
+ # used to stream from cache
+ for file_idx, file in enumerate(files):
+ with open(file, "rb") as f:
+ try:
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
+ pa_table = pa.Table.from_batches([record_batch])
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield f"{file_idx}_{batch_idx}", pa_table
+ except ValueError as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a0c735fad4f585a1e3858becd0630289563f0b4c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..556d77a1bed1277b21e05a7ced078f6e4aed499b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..24c32a746e8c15b23d048b39f7a88447ed0a1b2a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
@@ -0,0 +1,406 @@
+import collections
+import itertools
+import os
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Type
+
+import pandas as pd
+import pyarrow as pa
+import pyarrow.json as paj
+
+import datasets
+from datasets.features.features import FeatureType
+from datasets.tasks.base import TaskTemplate
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+def count_path_segments(path):
+ return path.replace("\\", "/").count("/")
+
+
+@dataclass
+class FolderBasedBuilderConfig(datasets.BuilderConfig):
+ """BuilderConfig for AutoFolder."""
+
+ features: Optional[datasets.Features] = None
+ drop_labels: bool = None
+ drop_metadata: bool = None
+
+
+class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
+ """
+ Base class for generic data loaders for vision and image data.
+
+
+ Abstract class attributes to be overridden by a child class:
+ BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)
+ BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)
+ BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
+ EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
+ will be included in a dataset)
+ CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure
+ """
+
+ BASE_FEATURE: Type[FeatureType]
+ BASE_COLUMN_NAME: str
+ BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
+ EXTENSIONS: List[str]
+ CLASSIFICATION_TASK: TaskTemplate
+
+ METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ # Do an early pass if:
+ # * `drop_labels` is None (default) or False, to infer the class labels
+ # * `drop_metadata` is None (default) or False, to find the metadata files
+ do_analyze = not self.config.drop_labels or not self.config.drop_metadata
+ labels, path_depths = set(), set()
+ metadata_files = collections.defaultdict(set)
+
+ def analyze(files_or_archives, downloaded_files_or_dirs, split):
+ if len(downloaded_files_or_dirs) == 0:
+ return
+ # The files are separated from the archives at this point, so check the first sample
+ # to see if it's a file or a directory and iterate accordingly
+ if os.path.isfile(downloaded_files_or_dirs[0]):
+ original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs
+ for original_file, downloaded_file in zip(original_files, downloaded_files):
+ original_file, downloaded_file = str(original_file), str(downloaded_file)
+ _, original_file_ext = os.path.splitext(original_file)
+ if original_file_ext.lower() in self.EXTENSIONS:
+ if not self.config.drop_labels:
+ labels.add(os.path.basename(os.path.dirname(original_file)))
+ path_depths.add(count_path_segments(original_file))
+ elif os.path.basename(original_file) in self.METADATA_FILENAMES:
+ metadata_files[split].add((original_file, downloaded_file))
+ else:
+ original_file_name = os.path.basename(original_file)
+ logger.debug(
+ f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either."
+ )
+ else:
+ archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs
+ for archive, downloaded_dir in zip(archives, downloaded_dirs):
+ archive, downloaded_dir = str(archive), str(downloaded_dir)
+ for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):
+ _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
+ if downloaded_dir_file_ext in self.EXTENSIONS:
+ if not self.config.drop_labels:
+ labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))
+ path_depths.add(count_path_segments(downloaded_dir_file))
+ elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:
+ metadata_files[split].add((None, downloaded_dir_file))
+ else:
+ archive_file_name = os.path.basename(archive)
+ original_file_name = os.path.basename(downloaded_dir_file)
+ logger.debug(
+ f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."
+ )
+
+ data_files = self.config.data_files
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ files, archives = self._split_files_and_archives(files)
+ downloaded_files = dl_manager.download(files)
+ downloaded_dirs = dl_manager.download_and_extract(archives)
+ if do_analyze: # drop_metadata is None or False, drop_labels is None or False
+ logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")
+ analyze(files, downloaded_files, split_name)
+ analyze(archives, downloaded_dirs, split_name)
+
+ if metadata_files:
+ # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False
+ add_metadata = not self.config.drop_metadata
+ # if `metadata_files` are found, add labels only if
+ # `drop_labels` is set up to False explicitly (not-default behavior)
+ add_labels = self.config.drop_labels is False
+ else:
+ # if `metadata_files` are not found, don't add metadata
+ add_metadata = False
+ # if `metadata_files` are not found and `drop_labels` is None (default) -
+ # add labels if files are on the same level in directory hierarchy and there is more than one label
+ add_labels = (
+ (len(labels) > 1 and len(path_depths) == 1)
+ if self.config.drop_labels is None
+ else not self.config.drop_labels
+ )
+
+ if add_labels:
+ logger.info("Adding the labels inferred from data directories to the dataset's features...")
+ if add_metadata:
+ logger.info("Adding metadata to the dataset...")
+ else:
+ add_labels, add_metadata, metadata_files = False, False, {}
+
+ splits.append(
+ datasets.SplitGenerator(
+ name=split_name,
+ gen_kwargs={
+ "files": list(zip(files, downloaded_files))
+ + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
+ "metadata_files": metadata_files,
+ "split_name": split_name,
+ "add_labels": add_labels,
+ "add_metadata": add_metadata,
+ },
+ )
+ )
+
+ if add_metadata:
+ # Verify that:
+ # * all metadata files have the same set of features
+ # * the `file_name` key is one of the metadata keys and is of type string
+ features_per_metadata_file: List[Tuple[str, datasets.Features]] = []
+
+ # Check that all metadata files share the same format
+ metadata_ext = {
+ os.path.splitext(original_metadata_file)[-1]
+ for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values())
+ }
+ if len(metadata_ext) > 1:
+ raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
+ metadata_ext = metadata_ext.pop()
+
+ for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
+ pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext)
+ features_per_metadata_file.append(
+ (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
+ )
+ for downloaded_metadata_file, metadata_features in features_per_metadata_file:
+ if metadata_features != features_per_metadata_file[0][1]:
+ raise ValueError(
+ f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"
+ )
+ metadata_features = features_per_metadata_file[0][1]
+ if "file_name" not in metadata_features:
+ raise ValueError("`file_name` must be present as dictionary key in metadata files")
+ if metadata_features["file_name"] != datasets.Value("string"):
+ raise ValueError("`file_name` key must be a string")
+ del metadata_features["file_name"]
+ else:
+ metadata_features = None
+
+ # Normally, we would do this in _info, but we need to know the labels and/or metadata
+ # before building the features
+ if self.config.features is None:
+ if add_labels:
+ self.info.features = datasets.Features(
+ {
+ self.BASE_COLUMN_NAME: self.BASE_FEATURE(),
+ "label": datasets.ClassLabel(names=sorted(labels)),
+ }
+ )
+ self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)]
+ else:
+ self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()})
+
+ if add_metadata:
+ # Warn if there are duplicated keys in metadata compared to the existing features
+ # (`BASE_COLUMN_NAME`, optionally "label")
+ duplicated_keys = set(self.info.features) & set(metadata_features)
+ if duplicated_keys:
+ logger.warning(
+ f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in "
+ f"the features dictionary."
+ )
+ # skip metadata duplicated keys
+ self.info.features.update(
+ {
+ feature: metadata_features[feature]
+ for feature in metadata_features
+ if feature not in duplicated_keys
+ }
+ )
+
+ return splits
+
+ def _split_files_and_archives(self, data_files):
+ files, archives = [], []
+ for data_file in data_files:
+ _, data_file_ext = os.path.splitext(data_file)
+ if data_file_ext.lower() in self.EXTENSIONS:
+ files.append(data_file)
+ elif os.path.basename(data_file) in self.METADATA_FILENAMES:
+ files.append(data_file)
+ else:
+ archives.append(data_file)
+ return files, archives
+
+ def _read_metadata(self, metadata_file, metadata_ext: str = ""):
+ if metadata_ext == ".csv":
+ # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
+ return pa.Table.from_pandas(pd.read_csv(metadata_file))
+ else:
+ with open(metadata_file, "rb") as f:
+ return paj.read_json(f)
+
+ def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels):
+ split_metadata_files = metadata_files.get(split_name, [])
+ sample_empty_metadata = (
+ {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {}
+ )
+ last_checked_dir = None
+ metadata_dir = None
+ metadata_dict = None
+ downloaded_metadata_file = None
+
+ metadata_ext = ""
+ if split_metadata_files:
+ metadata_ext = {
+ os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files
+ }
+ metadata_ext = metadata_ext.pop()
+
+ file_idx = 0
+ for original_file, downloaded_file_or_dir in files:
+ if original_file is not None:
+ _, original_file_ext = os.path.splitext(original_file)
+ if original_file_ext.lower() in self.EXTENSIONS:
+ if add_metadata:
+ # If the file is a file of a needed type, and we've just entered a new directory,
+ # find the nereast metadata file (by counting path segments) for the directory
+ current_dir = os.path.dirname(original_file)
+ if last_checked_dir is None or last_checked_dir != current_dir:
+ last_checked_dir = current_dir
+ metadata_file_candidates = [
+ (
+ os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)),
+ metadata_file_candidate,
+ downloaded_metadata_file,
+ )
+ for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
+ if metadata_file_candidate
+ is not None # ignore metadata_files that are inside archives
+ and not os.path.relpath(
+ original_file, os.path.dirname(metadata_file_candidate)
+ ).startswith("..")
+ ]
+ if metadata_file_candidates:
+ _, metadata_file, downloaded_metadata_file = min(
+ metadata_file_candidates, key=lambda x: count_path_segments(x[0])
+ )
+ pa_metadata_table = self._read_metadata(
+ downloaded_metadata_file, metadata_ext=metadata_ext
+ )
+ pa_file_name_array = pa_metadata_table["file_name"]
+ pa_metadata_table = pa_metadata_table.drop(["file_name"])
+ metadata_dir = os.path.dirname(metadata_file)
+ metadata_dict = {
+ os.path.normpath(file_name).replace("\\", "/"): sample_metadata
+ for file_name, sample_metadata in zip(
+ pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
+ )
+ }
+ else:
+ raise ValueError(
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
+ )
+ if metadata_dir is not None and downloaded_metadata_file is not None:
+ file_relpath = os.path.relpath(original_file, metadata_dir)
+ file_relpath = file_relpath.replace("\\", "/")
+ if file_relpath not in metadata_dict:
+ raise ValueError(
+ f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}."
+ )
+ sample_metadata = metadata_dict[file_relpath]
+ else:
+ raise ValueError(
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
+ )
+ else:
+ sample_metadata = {}
+ if add_labels:
+ sample_label = {"label": os.path.basename(os.path.dirname(original_file))}
+ else:
+ sample_label = {}
+ yield (
+ file_idx,
+ {
+ **sample_empty_metadata,
+ self.BASE_COLUMN_NAME: downloaded_file_or_dir,
+ **sample_metadata,
+ **sample_label,
+ },
+ )
+ file_idx += 1
+ else:
+ for downloaded_dir_file in downloaded_file_or_dir:
+ _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
+ if downloaded_dir_file_ext.lower() in self.EXTENSIONS:
+ if add_metadata:
+ current_dir = os.path.dirname(downloaded_dir_file)
+ if last_checked_dir is None or last_checked_dir != current_dir:
+ last_checked_dir = current_dir
+ metadata_file_candidates = [
+ (
+ os.path.relpath(
+ downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
+ ),
+ metadata_file_candidate,
+ downloaded_metadata_file,
+ )
+ for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
+ if metadata_file_candidate
+ is None # ignore metadata_files that are not inside archives
+ and not os.path.relpath(
+ downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
+ ).startswith("..")
+ ]
+ if metadata_file_candidates:
+ _, metadata_file, downloaded_metadata_file = min(
+ metadata_file_candidates, key=lambda x: count_path_segments(x[0])
+ )
+ pa_metadata_table = self._read_metadata(
+ downloaded_metadata_file, metadata_ext=metadata_ext
+ )
+ pa_file_name_array = pa_metadata_table["file_name"]
+ pa_metadata_table = pa_metadata_table.drop(["file_name"])
+ metadata_dir = os.path.dirname(downloaded_metadata_file)
+ metadata_dict = {
+ os.path.normpath(file_name).replace("\\", "/"): sample_metadata
+ for file_name, sample_metadata in zip(
+ pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
+ )
+ }
+ else:
+ raise ValueError(
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
+ )
+ if metadata_dir is not None and downloaded_metadata_file is not None:
+ downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
+ downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/")
+ if downloaded_dir_file_relpath not in metadata_dict:
+ raise ValueError(
+ f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}."
+ )
+ sample_metadata = metadata_dict[downloaded_dir_file_relpath]
+ else:
+ raise ValueError(
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
+ )
+ else:
+ sample_metadata = {}
+ if add_labels:
+ sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))}
+ else:
+ sample_label = {}
+ yield (
+ file_idx,
+ {
+ **sample_empty_metadata,
+ self.BASE_COLUMN_NAME: downloaded_dir_file,
+ **sample_metadata,
+ **sample_label,
+ },
+ )
+ file_idx += 1
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2cbd911495c2f6698e2b775a35341be322382808
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..010b82b2a8d71f16d0c739bb23b422c59ae68408
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/__pycache__/imagefolder.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd2dd0d419a626dbb5149cb56abf69c82d35deb4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/imagefolder/imagefolder.py
@@ -0,0 +1,104 @@
+from typing import List
+
+import datasets
+from datasets.tasks import ImageClassification
+
+from ..folder_based_builder import folder_based_builder
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
+ """BuilderConfig for ImageFolder."""
+
+ drop_labels: bool = None
+ drop_metadata: bool = None
+
+
+class ImageFolder(folder_based_builder.FolderBasedBuilder):
+ BASE_FEATURE = datasets.Image
+ BASE_COLUMN_NAME = "image"
+ BUILDER_CONFIG_CLASS = ImageFolderConfig
+ EXTENSIONS: List[str] # definition at the bottom of the script
+ CLASSIFICATION_TASK = ImageClassification(image_column="image", label_column="label")
+
+
+# Obtained with:
+# ```
+# import PIL.Image
+# IMAGE_EXTENSIONS = []
+# PIL.Image.init()
+# for ext, format in PIL.Image.EXTENSION.items():
+# if format in PIL.Image.OPEN:
+# IMAGE_EXTENSIONS.append(ext[1:])
+# ```
+# We intentionally do not run this code on launch because:
+# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
+# (2) To ensure the list of supported extensions is deterministic
+IMAGE_EXTENSIONS = [
+ ".blp",
+ ".bmp",
+ ".dib",
+ ".bufr",
+ ".cur",
+ ".pcx",
+ ".dcx",
+ ".dds",
+ ".ps",
+ ".eps",
+ ".fit",
+ ".fits",
+ ".fli",
+ ".flc",
+ ".ftc",
+ ".ftu",
+ ".gbr",
+ ".gif",
+ ".grib",
+ ".h5",
+ ".hdf",
+ ".png",
+ ".apng",
+ ".jp2",
+ ".j2k",
+ ".jpc",
+ ".jpf",
+ ".jpx",
+ ".j2c",
+ ".icns",
+ ".ico",
+ ".im",
+ ".iim",
+ ".tif",
+ ".tiff",
+ ".jfif",
+ ".jpe",
+ ".jpg",
+ ".jpeg",
+ ".mpg",
+ ".mpeg",
+ ".msp",
+ ".pcd",
+ ".pxr",
+ ".pbm",
+ ".pgm",
+ ".ppm",
+ ".pnm",
+ ".psd",
+ ".bw",
+ ".rgb",
+ ".rgba",
+ ".sgi",
+ ".ras",
+ ".tga",
+ ".icb",
+ ".vda",
+ ".vst",
+ ".webp",
+ ".wmf",
+ ".emf",
+ ".xbm",
+ ".xpm",
+]
+ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79d65c95dd8032660bb49f47beee0518a9951e01
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..172dd738f0488befa23c05391d6b3ca69e2e5fa6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/pandas.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py
new file mode 100644
index 0000000000000000000000000000000000000000..c17f389945e0fa55959e220e0b892cd7b3e8925d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/pandas/pandas.py
@@ -0,0 +1,62 @@
+import itertools
+import warnings
+from dataclasses import dataclass
+from typing import Optional
+
+import pandas as pd
+import pyarrow as pa
+
+import datasets
+from datasets.table import table_cast
+
+
+@dataclass
+class PandasConfig(datasets.BuilderConfig):
+ """BuilderConfig for Pandas."""
+
+ features: Optional[datasets.Features] = None
+
+
+class Pandas(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = PandasConfig
+
+ def _info(self):
+ warnings.warn(
+ "The Pandas builder is deprecated and will be removed in the next major version of datasets.",
+ FutureWarning,
+ )
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ # more expensive cast to support nested features with keys in a different order
+ # allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, self.config.features.arrow_schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ for i, file in enumerate(itertools.chain.from_iterable(files)):
+ with open(file, "rb") as f:
+ pa_table = pa.Table.from_pandas(pd.read_pickle(f))
+ yield i, self._cast_table(pa_table)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py
new file mode 100644
index 0000000000000000000000000000000000000000..399a2609f7e7012d84c72fb3c2a2662a28d70c22
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py
@@ -0,0 +1,100 @@
+import itertools
+from dataclasses import dataclass
+from typing import List, Optional
+
+import pyarrow as pa
+import pyarrow.parquet as pq
+
+import datasets
+from datasets.table import table_cast
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class ParquetConfig(datasets.BuilderConfig):
+ """BuilderConfig for Parquet."""
+
+ batch_size: Optional[int] = None
+ columns: Optional[List[str]] = None
+ features: Optional[datasets.Features] = None
+
+
+class Parquet(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = ParquetConfig
+
+ def _info(self):
+ if (
+ self.config.columns is not None
+ and self.config.features is not None
+ and set(self.config.columns) != set(self.config.features)
+ ):
+ raise ValueError(
+ "The columns and features argument must contain the same columns, but got ",
+ f"{self.config.columns} and {self.config.features}",
+ )
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ # Infer features if they are stored in the arrow schema
+ if self.info.features is None:
+ for file in itertools.chain.from_iterable(files):
+ with open(file, "rb") as f:
+ self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
+ break
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
+ self.info.features = datasets.Features(
+ {col: feat for col, feat in self.info.features.items() if col in self.config.columns}
+ )
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.info.features is not None:
+ # more expensive cast to support nested features with keys in a different order
+ # allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ if self.config.features is not None and self.config.columns is not None:
+ if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
+ raise ValueError(
+ f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
+ )
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ with open(file, "rb") as f:
+ parquet_file = pq.ParquetFile(f)
+ if parquet_file.metadata.num_row_groups > 0:
+ batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows
+ try:
+ for batch_idx, record_batch in enumerate(
+ parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)
+ ):
+ pa_table = pa.Table.from_batches([record_batch])
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
+ except ValueError as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a6f409e0ea83eaf9ff972321fab93786bc37fc8
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f8aa7220714acbc03f36094e160d7105ea942e5b
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0791ba88594fb8e76c957a11cca9936cf321bb4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py
@@ -0,0 +1,118 @@
+import sys
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
+
+import pandas as pd
+import pyarrow as pa
+
+import datasets
+import datasets.config
+from datasets.features.features import require_storage_cast
+from datasets.table import table_cast
+
+
+if TYPE_CHECKING:
+ import sqlite3
+
+ import sqlalchemy
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class SqlConfig(datasets.BuilderConfig):
+ """BuilderConfig for SQL."""
+
+ sql: Union[str, "sqlalchemy.sql.Selectable"] = None
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
+ index_col: Optional[Union[str, List[str]]] = None
+ coerce_float: bool = True
+ params: Optional[Union[List, Tuple, Dict]] = None
+ parse_dates: Optional[Union[List, Dict]] = None
+ columns: Optional[List[str]] = None
+ chunksize: Optional[int] = 10_000
+ features: Optional[datasets.Features] = None
+
+ def __post_init__(self):
+ if self.sql is None:
+ raise ValueError("sql must be specified")
+ if self.con is None:
+ raise ValueError("con must be specified")
+
+ def create_config_id(
+ self,
+ config_kwargs: dict,
+ custom_features: Optional[datasets.Features] = None,
+ ) -> str:
+ config_kwargs = config_kwargs.copy()
+ # We need to stringify the Selectable object to make its hash deterministic
+
+ # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
+ sql = config_kwargs["sql"]
+ if not isinstance(sql, str):
+ if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
+ import sqlalchemy
+
+ if isinstance(sql, sqlalchemy.sql.Selectable):
+ engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
+ sql_str = str(sql.compile(dialect=engine.dialect))
+ config_kwargs["sql"] = sql_str
+ else:
+ raise TypeError(
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
+ )
+ else:
+ raise TypeError(
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
+ )
+ con = config_kwargs["con"]
+ if not isinstance(con, str):
+ config_kwargs["con"] = id(con)
+ logger.info(
+ f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
+ )
+
+ return super().create_config_id(config_kwargs, custom_features=custom_features)
+
+ @property
+ def pd_read_sql_kwargs(self):
+ pd_read_sql_kwargs = {
+ "index_col": self.index_col,
+ "columns": self.columns,
+ "params": self.params,
+ "coerce_float": self.coerce_float,
+ "parse_dates": self.parse_dates,
+ }
+ return pd_read_sql_kwargs
+
+
+class Sql(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = SqlConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ schema = self.config.features.arrow_schema
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
+ # cheaper cast
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
+ else:
+ # more expensive cast; allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, schema)
+ return pa_table
+
+ def _generate_tables(self):
+ chunksize = self.config.chunksize
+ sql_reader = pd.read_sql(
+ self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
+ )
+ sql_reader = [sql_reader] if chunksize is None else sql_reader
+ for chunk_idx, df in enumerate(sql_reader):
+ pa_table = pa.Table.from_pandas(df)
+ yield chunk_idx, self._cast_table(pa_table)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..746f8593282086c08ef65a9e43764d5a7bd56491
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1bc8696abbe834c390470509b9735ef900f7ebb
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/__pycache__/text.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py
new file mode 100644
index 0000000000000000000000000000000000000000..47e07a0e4b35c9fa2af53c1a6455ac61e00ddf29
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/text/text.py
@@ -0,0 +1,129 @@
+import itertools
+import warnings
+from dataclasses import InitVar, dataclass
+from io import StringIO
+from typing import Optional
+
+import pyarrow as pa
+
+import datasets
+from datasets.features.features import require_storage_cast
+from datasets.table import table_cast
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class TextConfig(datasets.BuilderConfig):
+ """BuilderConfig for text files."""
+
+ features: Optional[datasets.Features] = None
+ encoding: str = "utf-8"
+ errors: InitVar[Optional[str]] = "deprecated"
+ encoding_errors: Optional[str] = None
+ chunksize: int = 10 << 20 # 10MB
+ keep_linebreaks: bool = False
+ sample_by: str = "line"
+
+ def __post_init__(self, errors):
+ if errors != "deprecated":
+ warnings.warn(
+ "'errors' was deprecated in favor of 'encoding_errors' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'encoding_errors={errors}' instead.",
+ FutureWarning,
+ )
+ self.encoding_errors = errors
+
+
+class Text(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = TextConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
+
+ If str or List[str], then the dataset returns only the 'train' split.
+ If dict, then keys should be from the `datasets.Split` enum.
+ """
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ files = [dl_manager.iter_files(file) for file in files]
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ schema = self.config.features.arrow_schema
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
+ # cheaper cast
+ pa_table = pa_table.cast(schema)
+ else:
+ # more expensive cast; allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, schema)
+ return pa_table
+ else:
+ return pa_table.cast(pa.schema({"text": pa.string()}))
+
+ def _generate_tables(self, files):
+ pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ # open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
+ with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
+ if self.config.sample_by == "line":
+ batch_idx = 0
+ while True:
+ batch = f.read(self.config.chunksize)
+ if not batch:
+ break
+ batch += f.readline() # finish current line
+ # StringIO.readlines, by default splits only on "\n" (and keeps line breaks)
+ batch = StringIO(batch).readlines()
+ if not self.config.keep_linebreaks:
+ batch = [line.rstrip("\n") for line in batch]
+ pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
+ batch_idx += 1
+ elif self.config.sample_by == "paragraph":
+ batch_idx = 0
+ batch = ""
+ while True:
+ new_batch = f.read(self.config.chunksize)
+ if not new_batch:
+ break
+ batch += new_batch
+ batch += f.readline() # finish current line
+ batch = batch.split("\n\n")
+ pa_table = pa.Table.from_arrays(
+ [pa.array([example for example in batch[:-1] if example])], names=pa_table_names
+ )
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
+ batch_idx += 1
+ batch = batch[-1]
+ if batch:
+ pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names)
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
+ elif self.config.sample_by == "document":
+ text = f.read()
+ pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)
+ yield file_idx, self._cast_table(pa_table)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69975d800bf8d555b43ec0471c9fe3c18068341c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__init__.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a18a1e79349cfb32a743aeca4c3e9a809645a75
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__init__.py
@@ -0,0 +1,46 @@
+from typing import Optional
+
+from ..utils.logging import get_logger
+from .audio_classification import AudioClassification
+from .automatic_speech_recognition import AutomaticSpeechRecognition
+from .base import TaskTemplate
+from .image_classification import ImageClassification
+from .language_modeling import LanguageModeling
+from .question_answering import QuestionAnsweringExtractive
+from .summarization import Summarization
+from .text_classification import TextClassification
+
+
+__all__ = [
+ "AutomaticSpeechRecognition",
+ "AudioClassification",
+ "ImageClassification",
+ "LanguageModeling",
+ "QuestionAnsweringExtractive",
+ "Summarization",
+ "TaskTemplate",
+ "TextClassification",
+]
+
+logger = get_logger(__name__)
+
+
+NAME2TEMPLATE = {
+ AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
+ AudioClassification.task: AudioClassification,
+ ImageClassification.task: ImageClassification,
+ LanguageModeling.task: LanguageModeling,
+ QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
+ Summarization.task: Summarization,
+ TextClassification.task: TextClassification,
+}
+
+
+def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
+ """Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
+ task_name = task_template_dict.get("task")
+ if task_name is None:
+ logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
+ return None
+ template = NAME2TEMPLATE.get(task_name)
+ return template.from_dict(task_template_dict)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62ba3fc7d1cd18c01de965203d9b696606d723cf
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63ca7512c5dba6f8a788397a7b045b6612b5ca29
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99ef959bfd2f80712b4b2a875af37f7cb0aa2786
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6dac55862cf681026aa2aaa493940753d18d0d6
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/audio_classification.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/audio_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f9fe402f3814b4db0eb1832405adcfaef77503e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/audio_classification.py
@@ -0,0 +1,33 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Audio, ClassLabel, Features
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class AudioClassification(TaskTemplate):
+ task: str = field(default="audio-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"audio": Audio()})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ audio_column: str = "audio"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.audio_column: "audio",
+ self.label_column: "labels",
+ }
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py
new file mode 100644
index 0000000000000000000000000000000000000000..103a98a1bc9774de6b652bbc69b41501a419f0f8
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py
@@ -0,0 +1,30 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Audio, Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class AutomaticSpeechRecognition(TaskTemplate):
+ task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"audio": Audio()})
+ label_schema: ClassVar[Features] = Features({"transcription": Value("string")})
+ audio_column: str = "audio"
+ transcription_column: str = "transcription"
+
+ def align_with_features(self, features):
+ if self.audio_column not in features:
+ raise ValueError(f"Column {self.audio_column} is not present in features.")
+ if not isinstance(features[self.audio_column], Audio):
+ raise ValueError(f"Column {self.audio_column} is not an Audio type.")
+ task_template = copy.deepcopy(self)
+ input_schema = self.input_schema.copy()
+ input_schema["audio"] = features[self.audio_column]
+ task_template.__dict__["input_schema"] = input_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.audio_column: "audio", self.transcription_column: "transcription"}
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/base.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..21a5337ffc0784a1ed12f4617a9a0ef6ba7253e5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/base.py
@@ -0,0 +1,39 @@
+import abc
+import copy
+import dataclasses
+from dataclasses import dataclass
+from typing import ClassVar, Dict, Type, TypeVar
+
+from ..features import Features
+
+
+T = TypeVar("T", bound="TaskTemplate")
+
+
+@dataclass(frozen=True)
+class TaskTemplate(abc.ABC):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str
+ input_schema: ClassVar[Features]
+ label_schema: ClassVar[Features]
+
+ def align_with_features(self: T, features: Features) -> T:
+ """
+ Align features with the task template.
+ """
+ # No-op
+ return copy.deepcopy(self)
+
+ @property
+ def features(self) -> Features:
+ return Features(**self.input_schema, **self.label_schema)
+
+ @property
+ @abc.abstractmethod
+ def column_mapping(self) -> Dict[str, str]:
+ raise NotImplementedError
+
+ @classmethod
+ def from_dict(cls: Type[T], template_dict: dict) -> T:
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in template_dict.items() if k in field_names})
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/image_classification.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/image_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..20a19e0408a7ec8061ac4fac700d83e6dcbadcdf
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/image_classification.py
@@ -0,0 +1,33 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import ClassLabel, Features, Image
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class ImageClassification(TaskTemplate):
+ task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"image": Image()})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ image_column: str = "image"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.image_column: "image",
+ self.label_column: "labels",
+ }
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/language_modeling.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/language_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2837744fa1718e57ffbeeca1a6e9a60c9468d8f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/language_modeling.py
@@ -0,0 +1,18 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class LanguageModeling(TaskTemplate):
+ task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
+
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({})
+ text_column: str = "text"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text"}
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/question_answering.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/question_answering.py
new file mode 100644
index 0000000000000000000000000000000000000000..349fd54141762631eec025681015cedd97c23b63
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/question_answering.py
@@ -0,0 +1,29 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Sequence, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class QuestionAnsweringExtractive(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")})
+ label_schema: ClassVar[Features] = Features(
+ {
+ "answers": Sequence(
+ {
+ "text": Value("string"),
+ "answer_start": Value("int32"),
+ }
+ )
+ }
+ )
+ question_column: str = "question"
+ context_column: str = "context"
+ answers_column: str = "answers"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/summarization.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/summarization.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0057b07b4f62947c1bfde1962bf06be1427c363
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/summarization.py
@@ -0,0 +1,19 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class Summarization(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({"summary": Value("string")})
+ text_column: str = "text"
+ summary_column: str = "summary"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text", self.summary_column: "summary"}
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/tasks/text_classification.py b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/text_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..13584b73e8ae668bd6c145b60598cd6859be5146
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/tasks/text_classification.py
@@ -0,0 +1,34 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import ClassLabel, Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class TextClassification(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="text-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ text_column: str = "text"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.text_column: "text",
+ self.label_column: "labels",
+ }
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_filelock.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_filelock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..402762ef2e54678462a99fd822a806cbf1a03b70
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/_filelock.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/beam_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/beam_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b594500c79c1bba2d0128f71c66b2c80d184cab
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/beam_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/download_manager.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/download_manager.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..099236435709413b1914f167ad9d39dd0da9b798
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/download_manager.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/extract.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/extract.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81568e803a8418a29c994d40ca9c997a4c14d4df
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/extract.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/info_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/info_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd0587939d8b44cef69e7074c9639584f8f690b3
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/info_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/sharding.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/sharding.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba9ee0d421634d4c91b48a52d18969285c94f5d4
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/sharding.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/typing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/typing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..442ac56546588c0ddccea5640a945c00297ed781
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/__pycache__/typing.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/extract.py b/llmeval-env/lib/python3.10/site-packages/datasets/utils/extract.py
new file mode 100644
index 0000000000000000000000000000000000000000..39e3babf97b7563d39d8dbf59f2203b460c8ac21
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/extract.py
@@ -0,0 +1,351 @@
+import bz2
+import gzip
+import lzma
+import os
+import shutil
+import struct
+import tarfile
+import warnings
+import zipfile
+from abc import ABC, abstractmethod
+from pathlib import Path
+from typing import Dict, List, Optional, Type, Union
+
+from .. import config
+from ._filelock import FileLock
+from .logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+class ExtractManager:
+ def __init__(self, cache_dir: Optional[str] = None):
+ self.extract_dir = (
+ os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
+ )
+ self.extractor = Extractor
+
+ def _get_output_path(self, path: str) -> str:
+ from .file_utils import hash_url_to_filename
+
+ # Path where we extract compressed archives
+ # We extract in the cache dir, and get the extracted path name by hashing the original path"
+ abs_path = os.path.abspath(path)
+ return os.path.join(self.extract_dir, hash_url_to_filename(abs_path))
+
+ def _do_extract(self, output_path: str, force_extract: bool) -> bool:
+ return force_extract or (
+ not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path))
+ )
+
+ def extract(self, input_path: str, force_extract: bool = False) -> str:
+ extractor_format = self.extractor.infer_extractor_format(input_path)
+ if not extractor_format:
+ return input_path
+ output_path = self._get_output_path(input_path)
+ if self._do_extract(output_path, force_extract):
+ self.extractor.extract(input_path, output_path, extractor_format)
+ return output_path
+
+
+class BaseExtractor(ABC):
+ @classmethod
+ @abstractmethod
+ def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: ...
+
+ @staticmethod
+ @abstractmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: ...
+
+
+class MagicNumberBaseExtractor(BaseExtractor, ABC):
+ magic_numbers: List[bytes] = []
+
+ @staticmethod
+ def read_magic_number(path: Union[Path, str], magic_number_length: int):
+ with open(path, "rb") as f:
+ return f.read(magic_number_length)
+
+ @classmethod
+ def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
+ if not magic_number:
+ magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers)
+ try:
+ magic_number = cls.read_magic_number(path, magic_number_length)
+ except OSError:
+ return False
+ return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers)
+
+
+class TarExtractor(BaseExtractor):
+ @classmethod
+ def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool:
+ return tarfile.is_tarfile(path)
+
+ @staticmethod
+ def safemembers(members, output_path):
+ """
+ Fix for CVE-2007-4559
+ Desc:
+ Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile
+ module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot)
+ sequence in filenames in a TAR archive, a related issue to CVE-2001-1267.
+ See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559
+ From: https://stackoverflow.com/a/10077309
+ """
+
+ def resolved(path: str) -> str:
+ return os.path.realpath(os.path.abspath(path))
+
+ def badpath(path: str, base: str) -> bool:
+ # joinpath will ignore base if path is absolute
+ return not resolved(os.path.join(base, path)).startswith(base)
+
+ def badlink(info, base: str) -> bool:
+ # Links are interpreted relative to the directory containing the link
+ tip = resolved(os.path.join(base, os.path.dirname(info.name)))
+ return badpath(info.linkname, base=tip)
+
+ base = resolved(output_path)
+
+ for finfo in members:
+ if badpath(finfo.name, base):
+ logger.error(f"Extraction of {finfo.name} is blocked (illegal path)")
+ elif finfo.issym() and badlink(finfo, base):
+ logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}")
+ elif finfo.islnk() and badlink(finfo, base):
+ logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}")
+ else:
+ yield finfo
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ os.makedirs(output_path, exist_ok=True)
+ tar_file = tarfile.open(input_path)
+ tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path))
+ tar_file.close()
+
+
+class GzipExtractor(MagicNumberBaseExtractor):
+ magic_numbers = [b"\x1f\x8b"]
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ with gzip.open(input_path, "rb") as gzip_file:
+ with open(output_path, "wb") as extracted_file:
+ shutil.copyfileobj(gzip_file, extracted_file)
+
+
+class ZipExtractor(MagicNumberBaseExtractor):
+ magic_numbers = [
+ b"PK\x03\x04",
+ b"PK\x05\x06", # empty archive
+ b"PK\x07\x08", # spanned archive
+ ]
+
+ @classmethod
+ def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool:
+ if super().is_extractable(path, magic_number=magic_number):
+ return True
+ try:
+ # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
+ # From: https://github.com/python/cpython/pull/5053
+ from zipfile import (
+ _CD_SIGNATURE,
+ _ECD_DISK_NUMBER,
+ _ECD_DISK_START,
+ _ECD_ENTRIES_TOTAL,
+ _ECD_OFFSET,
+ _ECD_SIZE,
+ _EndRecData,
+ sizeCentralDir,
+ stringCentralDir,
+ structCentralDir,
+ )
+
+ with open(path, "rb") as fp:
+ endrec = _EndRecData(fp)
+ if endrec:
+ if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
+ return True # Empty zipfiles are still zipfiles
+ elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
+ fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
+ if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
+ data = fp.read(sizeCentralDir) # CD is where we expect it to be
+ if len(data) == sizeCentralDir:
+ centdir = struct.unpack(structCentralDir, data) # CD is the right size
+ if centdir[_CD_SIGNATURE] == stringCentralDir:
+ return True # First central directory entry has correct magic number
+ return False
+ except Exception: # catch all errors in case future python versions change the zipfile internals
+ return False
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ os.makedirs(output_path, exist_ok=True)
+ with zipfile.ZipFile(input_path, "r") as zip_file:
+ zip_file.extractall(output_path)
+ zip_file.close()
+
+
+class XzExtractor(MagicNumberBaseExtractor):
+ magic_numbers = [b"\xfd\x37\x7a\x58\x5a\x00"]
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ with lzma.open(input_path) as compressed_file:
+ with open(output_path, "wb") as extracted_file:
+ shutil.copyfileobj(compressed_file, extracted_file)
+
+
+class RarExtractor(MagicNumberBaseExtractor):
+ magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ if not config.RARFILE_AVAILABLE:
+ raise ImportError("Please pip install rarfile")
+ import rarfile
+
+ os.makedirs(output_path, exist_ok=True)
+ rf = rarfile.RarFile(input_path)
+ rf.extractall(output_path)
+ rf.close()
+
+
+class ZstdExtractor(MagicNumberBaseExtractor):
+ magic_numbers = [b"\x28\xb5\x2f\xfd"]
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ if not config.ZSTANDARD_AVAILABLE:
+ raise ImportError("Please pip install zstandard")
+ import zstandard as zstd
+
+ dctx = zstd.ZstdDecompressor()
+ with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh:
+ dctx.copy_stream(ifh, ofh)
+
+
+class Bzip2Extractor(MagicNumberBaseExtractor):
+ magic_numbers = [b"\x42\x5a\x68"]
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ with bz2.open(input_path, "rb") as compressed_file:
+ with open(output_path, "wb") as extracted_file:
+ shutil.copyfileobj(compressed_file, extracted_file)
+
+
+class SevenZipExtractor(MagicNumberBaseExtractor):
+ magic_numbers = [b"\x37\x7a\xbc\xaf\x27\x1c"]
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ if not config.PY7ZR_AVAILABLE:
+ raise ImportError("Please pip install py7zr")
+ import py7zr
+
+ os.makedirs(output_path, exist_ok=True)
+ with py7zr.SevenZipFile(input_path, "r") as archive:
+ archive.extractall(output_path)
+
+
+class Lz4Extractor(MagicNumberBaseExtractor):
+ magic_numbers = [b"\x04\x22\x4d\x18"]
+
+ @staticmethod
+ def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None:
+ if not config.LZ4_AVAILABLE:
+ raise ImportError("Please pip install lz4")
+ import lz4.frame
+
+ with lz4.frame.open(input_path, "rb") as compressed_file:
+ with open(output_path, "wb") as extracted_file:
+ shutil.copyfileobj(compressed_file, extracted_file)
+
+
+class Extractor:
+ # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
+ extractors: Dict[str, Type[BaseExtractor]] = {
+ "tar": TarExtractor,
+ "gzip": GzipExtractor,
+ "zip": ZipExtractor,
+ "xz": XzExtractor,
+ "rar": RarExtractor,
+ "zstd": ZstdExtractor,
+ "bz2": Bzip2Extractor,
+ "7z": SevenZipExtractor, #
+ "lz4": Lz4Extractor, #
+ }
+
+ @classmethod
+ def _get_magic_number_max_length(cls):
+ return max(
+ len(extractor_magic_number)
+ for extractor in cls.extractors.values()
+ if issubclass(extractor, MagicNumberBaseExtractor)
+ for extractor_magic_number in extractor.magic_numbers
+ )
+
+ @staticmethod
+ def _read_magic_number(path: Union[Path, str], magic_number_length: int):
+ try:
+ return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length)
+ except OSError:
+ return b""
+
+ @classmethod
+ def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool:
+ warnings.warn(
+ "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
+ "Use 'infer_extractor_format' instead.",
+ category=FutureWarning,
+ )
+ extractor_format = cls.infer_extractor_format(path)
+ if extractor_format:
+ return True if not return_extractor else (True, cls.extractors[extractor_format])
+ return False if not return_extractor else (False, None)
+
+ @classmethod
+ def infer_extractor_format(cls, path: Union[Path, str]) -> Optional[str]: #
+ magic_number_max_length = cls._get_magic_number_max_length()
+ magic_number = cls._read_magic_number(path, magic_number_max_length)
+ for extractor_format, extractor in cls.extractors.items():
+ if extractor.is_extractable(path, magic_number=magic_number):
+ return extractor_format
+
+ @classmethod
+ def extract(
+ cls,
+ input_path: Union[Path, str],
+ output_path: Union[Path, str],
+ extractor_format: Optional[str] = None, #
+ extractor: Optional[BaseExtractor] = "deprecated",
+ ) -> None:
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
+ # Prevent parallel extractions
+ lock_path = str(Path(output_path).with_suffix(".lock"))
+ with FileLock(lock_path):
+ shutil.rmtree(output_path, ignore_errors=True)
+ if extractor_format or extractor != "deprecated":
+ if extractor != "deprecated" or not isinstance(extractor_format, str): # passed as positional arg
+ warnings.warn(
+ "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
+ "Use 'extractor_format' instead.",
+ category=FutureWarning,
+ )
+ extractor = extractor if extractor != "deprecated" else extractor_format
+ else:
+ extractor = cls.extractors[extractor_format]
+ return extractor.extract(input_path, output_path)
+ else:
+ warnings.warn(
+ "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
+ "exception in 3.0.0.",
+ category=FutureWarning,
+ )
+ for extractor in cls.extractors.values():
+ if extractor.is_extractable(input_path):
+ return extractor.extract(input_path, output_path)
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce7634dc32d90a3676c245a305dbc79b8d5bff0d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/languages.json b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/languages.json
new file mode 100644
index 0000000000000000000000000000000000000000..ea7686f956b898af3faf97b86be89b71d88855d4
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/languages.json
@@ -0,0 +1,8026 @@
+{
+ "code": "Programming language (C++, Java, Javascript, Python, etc.)",
+ "aa": "Afar",
+ "aaa": "Ghotuo",
+ "aab": "Alumu-Tesu",
+ "aac": "Ari",
+ "aad": "Amal",
+ "aae": "Arbëreshë Albanian",
+ "aaf": "Aranadan",
+ "aag": "Ambrak",
+ "aah": "Abu' Arapesh",
+ "aai": "Arifama-Miniafia",
+ "aak": "Ankave",
+ "aal": "Afade",
+ "aan": "Anambé",
+ "aao": "Algerian Saharan Arabic",
+ "aap": "Pará Arára",
+ "aaq": "Eastern Abnaki",
+ "aas": "Aasáx",
+ "aat": "Arvanitika Albanian",
+ "aau": "Abau",
+ "aav": "Austro-Asiatic languages",
+ "aaw": "Solong",
+ "aax": "Mandobo Atas",
+ "aaz": "Amarasi",
+ "ab": "Abkhazian",
+ "aba": "Abé",
+ "abb": "Bankon",
+ "abc": "Ambala Ayta",
+ "abd": "Manide",
+ "abe": "Western Abnaki",
+ "abf": "Abai Sungai",
+ "abg": "Abaga",
+ "abh": "Tajiki Arabic",
+ "abi": "Abidji",
+ "abj": "Aka-Bea",
+ "abl": "Lampung Nyo",
+ "abm": "Abanyom",
+ "abn": "Abua",
+ "abo": "Abon",
+ "abp": "Abellen Ayta",
+ "abq": "Abaza",
+ "abr": "Abron",
+ "abs": "Ambonese Malay",
+ "abt": "Ambulas",
+ "abu": "Abure",
+ "abv": "Baharna Arabic",
+ "abw": "Pal",
+ "abx": "Inabaknon",
+ "aby": "Aneme Wake",
+ "abz": "Abui",
+ "aca": "Achagua",
+ "acb": "Áncá",
+ "acd": "Gikyode",
+ "ace": "Achinese",
+ "acf": "Saint Lucian Creole French",
+ "ach": "Acoli",
+ "aci": "Aka-Cari",
+ "ack": "Aka-Kora",
+ "acl": "Akar-Bale",
+ "acm": "Mesopotamian Arabic",
+ "acn": "Achang",
+ "acp": "Eastern Acipa",
+ "acq": "Ta'izzi-Adeni Arabic",
+ "acr": "Achi",
+ "acs": "Acroá",
+ "act": "Achterhoeks",
+ "acu": "Achuar-Shiwiar",
+ "acv": "Achumawi",
+ "acw": "Hijazi Arabic",
+ "acx": "Omani Arabic",
+ "acy": "Cypriot Arabic",
+ "acz": "Acheron",
+ "ada": "Adangme",
+ "adb": "Atauran",
+ "add": "Lidzonka; Dzodinka",
+ "ade": "Adele",
+ "adf": "Dhofari Arabic",
+ "adg": "Andegerebinha",
+ "adh": "Adhola",
+ "adi": "Adi",
+ "adj": "Adioukrou",
+ "adl": "Galo",
+ "adn": "Adang",
+ "ado": "Abu",
+ "adq": "Adangbe",
+ "adr": "Adonara",
+ "ads": "Adamorobe Sign Language",
+ "adt": "Adnyamathanha",
+ "adu": "Aduge",
+ "adw": "Amundava",
+ "adx": "Amdo Tibetan",
+ "ady": "Adyghe; Adygei",
+ "adz": "Adzera",
+ "ae": "Avestan",
+ "aea": "Areba",
+ "aeb": "Tunisian Arabic",
+ "aec": "Saidi Arabic",
+ "aed": "Argentine Sign Language",
+ "aee": "Northeast Pashai; Northeast Pashayi",
+ "aek": "Haeke",
+ "ael": "Ambele",
+ "aem": "Arem",
+ "aen": "Armenian Sign Language",
+ "aeq": "Aer",
+ "aer": "Eastern Arrernte",
+ "aes": "Alsea",
+ "aeu": "Akeu",
+ "aew": "Ambakich",
+ "aey": "Amele",
+ "aez": "Aeka",
+ "af": "Afrikaans",
+ "afa": "Afro-Asiatic languages",
+ "afb": "Gulf Arabic",
+ "afd": "Andai",
+ "afe": "Putukwam",
+ "afg": "Afghan Sign Language",
+ "afh": "Afrihili",
+ "afi": "Akrukay; Chini",
+ "afk": "Nanubae",
+ "afn": "Defaka",
+ "afo": "Eloyi",
+ "afp": "Tapei",
+ "afs": "Afro-Seminole Creole",
+ "aft": "Afitti",
+ "afu": "Awutu",
+ "afz": "Obokuitai",
+ "aga": "Aguano",
+ "agb": "Legbo",
+ "agc": "Agatu",
+ "agd": "Agarabi",
+ "age": "Angal",
+ "agf": "Arguni",
+ "agg": "Angor",
+ "agh": "Ngelima",
+ "agi": "Agariya",
+ "agj": "Argobba",
+ "agk": "Isarog Agta",
+ "agl": "Fembe",
+ "agm": "Angaataha",
+ "agn": "Agutaynen",
+ "ago": "Tainae",
+ "agq": "Aghem",
+ "agr": "Aguaruna",
+ "ags": "Esimbi",
+ "agt": "Central Cagayan Agta",
+ "agu": "Aguacateco",
+ "agv": "Remontado Dumagat",
+ "agw": "Kahua",
+ "agx": "Aghul",
+ "agy": "Southern Alta",
+ "agz": "Mt. Iriga Agta",
+ "aha": "Ahanta",
+ "ahb": "Axamb",
+ "ahg": "Qimant",
+ "ahh": "Aghu",
+ "ahi": "Tiagbamrin Aizi",
+ "ahk": "Akha",
+ "ahl": "Igo",
+ "ahm": "Mobumrin Aizi",
+ "ahn": "Àhàn",
+ "aho": "Ahom",
+ "ahp": "Aproumu Aizi",
+ "ahr": "Ahirani",
+ "ahs": "Ashe",
+ "aht": "Ahtena",
+ "aia": "Arosi",
+ "aib": "Ainu (China)",
+ "aic": "Ainbai",
+ "aid": "Alngith",
+ "aie": "Amara",
+ "aif": "Agi",
+ "aig": "Antigua and Barbuda Creole English",
+ "aih": "Ai-Cham",
+ "aii": "Assyrian Neo-Aramaic",
+ "aij": "Lishanid Noshan",
+ "aik": "Ake",
+ "ail": "Aimele",
+ "aim": "Aimol",
+ "ain": "Ainu (Japan)",
+ "aio": "Aiton",
+ "aip": "Burumakok",
+ "aiq": "Aimaq",
+ "air": "Airoran",
+ "ait": "Arikem",
+ "aiw": "Aari",
+ "aix": "Aighon",
+ "aiy": "Ali",
+ "aja": "Aja (South Sudan)",
+ "ajg": "Aja (Benin)",
+ "aji": "Ajië",
+ "ajn": "Andajin",
+ "ajp": "South Levantine Arabic",
+ "ajs": "Algerian Jewish Sign Language",
+ "aju": "Judeo-Moroccan Arabic",
+ "ajw": "Ajawa",
+ "ajz": "Amri Karbi",
+ "ak": "Akan",
+ "akb": "Batak Angkola",
+ "akc": "Mpur",
+ "akd": "Ukpet-Ehom",
+ "ake": "Akawaio",
+ "akf": "Akpa",
+ "akg": "Anakalangu",
+ "akh": "Angal Heneng",
+ "aki": "Aiome",
+ "akj": "Aka-Jeru",
+ "akk": "Akkadian",
+ "akl": "Aklanon",
+ "akm": "Aka-Bo",
+ "ako": "Akurio",
+ "akp": "Siwu",
+ "akq": "Ak",
+ "akr": "Araki",
+ "aks": "Akaselem",
+ "akt": "Akolet",
+ "aku": "Akum",
+ "akv": "Akhvakh",
+ "akw": "Akwa",
+ "akx": "Aka-Kede",
+ "aky": "Aka-Kol",
+ "akz": "Alabama",
+ "ala": "Alago",
+ "alc": "Qawasqar",
+ "ald": "Alladian",
+ "ale": "Aleut",
+ "alf": "Alege",
+ "alg": "Algonquian languages",
+ "alh": "Alawa",
+ "ali": "Amaimon",
+ "alj": "Alangan",
+ "alk": "Alak",
+ "all": "Allar",
+ "alm": "Amblong",
+ "aln": "Gheg Albanian",
+ "alo": "Larike-Wakasihu",
+ "alp": "Alune",
+ "alq": "Algonquin",
+ "alr": "Alutor",
+ "als": "Tosk Albanian",
+ "alt": "Southern Altai",
+ "alu": "'Are'are",
+ "alv": "Atlantic-Congo languages",
+ "alw": "Alaba-K’abeena; Wanbasana",
+ "alx": "Amol",
+ "aly": "Alyawarr",
+ "alz": "Alur",
+ "am": "Amharic",
+ "ama": "Amanayé",
+ "amb": "Ambo",
+ "amc": "Amahuaca",
+ "ame": "Yanesha'",
+ "amf": "Hamer-Banna",
+ "amg": "Amurdak",
+ "ami": "Amis",
+ "amj": "Amdang",
+ "amk": "Ambai",
+ "aml": "War-Jaintia",
+ "amm": "Ama (Papua New Guinea)",
+ "amn": "Amanab",
+ "amo": "Amo",
+ "amp": "Alamblak",
+ "amq": "Amahai",
+ "amr": "Amarakaeri",
+ "ams": "Southern Amami-Oshima",
+ "amt": "Amto",
+ "amu": "Guerrero Amuzgo",
+ "amv": "Ambelau",
+ "amw": "Western Neo-Aramaic",
+ "amx": "Anmatyerre",
+ "amy": "Ami",
+ "amz": "Atampaya",
+ "an": "Aragonese",
+ "ana": "Andaqui",
+ "anb": "Andoa",
+ "anc": "Ngas",
+ "and": "Ansus",
+ "ane": "Xârâcùù",
+ "anf": "Animere",
+ "ang": "Old English (ca. 450-1100)",
+ "anh": "Nend",
+ "ani": "Andi",
+ "anj": "Anor",
+ "ank": "Goemai",
+ "anl": "Anu-Hkongso Chin",
+ "anm": "Anal",
+ "ann": "Obolo",
+ "ano": "Andoque",
+ "anp": "Angika",
+ "anq": "Jarawa (India)",
+ "anr": "Andh",
+ "ans": "Anserma",
+ "ant": "Antakarinya; Antikarinya",
+ "anu": "Anuak",
+ "anv": "Denya",
+ "anw": "Anaang",
+ "anx": "Andra-Hus",
+ "any": "Anyin",
+ "anz": "Anem",
+ "aoa": "Angolar",
+ "aob": "Abom",
+ "aoc": "Pemon",
+ "aod": "Andarum",
+ "aoe": "Angal Enen",
+ "aof": "Bragat",
+ "aog": "Angoram",
+ "aoi": "Anindilyakwa",
+ "aoj": "Mufian",
+ "aok": "Arhö",
+ "aol": "Alor",
+ "aom": "Ömie",
+ "aon": "Bumbita Arapesh",
+ "aor": "Aore",
+ "aos": "Taikat",
+ "aot": "Atong (India); A'tong",
+ "aou": "A'ou",
+ "aox": "Atorada",
+ "aoz": "Uab Meto",
+ "apa": "Apache languages",
+ "apb": "Sa'a",
+ "apc": "North Levantine Arabic",
+ "apd": "Sudanese Arabic",
+ "ape": "Bukiyip",
+ "apf": "Pahanan Agta",
+ "apg": "Ampanang",
+ "aph": "Athpariya",
+ "api": "Apiaká",
+ "apj": "Jicarilla Apache",
+ "apk": "Kiowa Apache",
+ "apl": "Lipan Apache",
+ "apm": "Mescalero-Chiricahua Apache",
+ "apn": "Apinayé",
+ "apo": "Ambul",
+ "app": "Apma",
+ "apq": "A-Pucikwar",
+ "apr": "Arop-Lokep",
+ "aps": "Arop-Sissano",
+ "apt": "Apatani",
+ "apu": "Apurinã",
+ "apv": "Alapmunte",
+ "apw": "Western Apache",
+ "apx": "Aputai",
+ "apy": "Apalaí",
+ "apz": "Safeyoka",
+ "aqa": "Alacalufan languages",
+ "aqc": "Archi",
+ "aqd": "Ampari Dogon",
+ "aqg": "Arigidi",
+ "aqk": "Aninka",
+ "aql": "Algic languages",
+ "aqm": "Atohwaim",
+ "aqn": "Northern Alta",
+ "aqp": "Atakapa",
+ "aqr": "Arhâ",
+ "aqt": "Angaité",
+ "aqz": "Akuntsu",
+ "ar": "Arabic",
+ "arb": "Standard Arabic",
+ "arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)",
+ "ard": "Arabana",
+ "are": "Western Arrarnta",
+ "arh": "Arhuaco",
+ "ari": "Arikara",
+ "arj": "Arapaso",
+ "ark": "Arikapú",
+ "arl": "Arabela",
+ "arn": "Mapudungun; Mapuche",
+ "aro": "Araona",
+ "arp": "Arapaho",
+ "arq": "Algerian Arabic",
+ "arr": "Karo (Brazil)",
+ "ars": "Najdi Arabic",
+ "art": "Artificial languages",
+ "aru": "Aruá (Amazonas State); Arawá",
+ "arv": "Arbore",
+ "arw": "Arawak",
+ "arx": "Aruá (Rodonia State)",
+ "ary": "Moroccan Arabic",
+ "arz": "Egyptian Arabic",
+ "as": "Assamese",
+ "asa": "Asu (Tanzania)",
+ "asb": "Assiniboine",
+ "asc": "Casuarina Coast Asmat",
+ "ase": "American Sign Language",
+ "asf": "Auslan; Australian Sign Language",
+ "asg": "Cishingini",
+ "ash": "Abishira",
+ "asi": "Buruwai",
+ "asj": "Sari",
+ "ask": "Ashkun",
+ "asl": "Asilulu",
+ "asn": "Xingú Asuriní",
+ "aso": "Dano",
+ "asp": "Algerian Sign Language",
+ "asq": "Austrian Sign Language",
+ "asr": "Asuri",
+ "ass": "Ipulo",
+ "ast": "Asturian; Asturleonese; Bable; Leonese",
+ "asu": "Tocantins Asurini",
+ "asv": "Asoa",
+ "asw": "Australian Aborigines Sign Language",
+ "asx": "Muratayak",
+ "asy": "Yaosakor Asmat",
+ "asz": "As",
+ "ata": "Pele-Ata",
+ "atb": "Zaiwa",
+ "atc": "Atsahuaca",
+ "atd": "Ata Manobo",
+ "ate": "Atemble",
+ "atg": "Ivbie North-Okpela-Arhe",
+ "ath": "Athapascan languages",
+ "ati": "Attié",
+ "atj": "Atikamekw",
+ "atk": "Ati",
+ "atl": "Mt. Iraya Agta",
+ "atm": "Ata",
+ "atn": "Ashtiani",
+ "ato": "Atong (Cameroon)",
+ "atp": "Pudtol Atta",
+ "atq": "Aralle-Tabulahan",
+ "atr": "Waimiri-Atroari",
+ "ats": "Gros Ventre",
+ "att": "Pamplona Atta",
+ "atu": "Reel",
+ "atv": "Northern Altai",
+ "atw": "Atsugewi",
+ "atx": "Arutani",
+ "aty": "Aneityum",
+ "atz": "Arta",
+ "aua": "Asumboa",
+ "aub": "Alugu",
+ "auc": "Waorani",
+ "aud": "Anuta",
+ "auf": "Arauan languages",
+ "aug": "Aguna",
+ "auh": "Aushi",
+ "aui": "Anuki",
+ "auj": "Awjilah",
+ "auk": "Heyo",
+ "aul": "Aulua",
+ "aum": "Asu (Nigeria)",
+ "aun": "Molmo One",
+ "auo": "Auyokawa",
+ "aup": "Makayam",
+ "auq": "Anus; Korur",
+ "aur": "Aruek",
+ "aus": "Australian languages",
+ "aut": "Austral",
+ "auu": "Auye",
+ "auw": "Awyi",
+ "aux": "Aurá",
+ "auy": "Awiyaana",
+ "auz": "Uzbeki Arabic",
+ "av": "Avaric",
+ "avb": "Avau",
+ "avd": "Alviri-Vidari",
+ "avi": "Avikam",
+ "avk": "Kotava",
+ "avl": "Eastern Egyptian Bedawi Arabic",
+ "avm": "Angkamuthi",
+ "avn": "Avatime",
+ "avo": "Agavotaguerra",
+ "avs": "Aushiri",
+ "avt": "Au",
+ "avu": "Avokaya",
+ "avv": "Avá-Canoeiro",
+ "awa": "Awadhi",
+ "awb": "Awa (Papua New Guinea)",
+ "awc": "Cicipu",
+ "awd": "Arawakan languages",
+ "awe": "Awetí",
+ "awg": "Anguthimri",
+ "awh": "Awbono",
+ "awi": "Aekyom",
+ "awk": "Awabakal",
+ "awm": "Arawum",
+ "awn": "Awngi",
+ "awo": "Awak",
+ "awr": "Awera",
+ "aws": "South Awyu",
+ "awt": "Araweté",
+ "awu": "Central Awyu",
+ "awv": "Jair Awyu",
+ "aww": "Awun",
+ "awx": "Awara",
+ "awy": "Edera Awyu",
+ "axb": "Abipon",
+ "axe": "Ayerrerenge",
+ "axg": "Mato Grosso Arára",
+ "axk": "Yaka (Central African Republic)",
+ "axl": "Lower Southern Aranda",
+ "axm": "Middle Armenian",
+ "axx": "Xârâgurè",
+ "ay": "Aymara",
+ "aya": "Awar",
+ "ayb": "Ayizo Gbe",
+ "ayc": "Southern Aymara",
+ "ayd": "Ayabadhu",
+ "aye": "Ayere",
+ "ayg": "Ginyanga",
+ "ayh": "Hadrami Arabic",
+ "ayi": "Leyigha",
+ "ayk": "Akuku",
+ "ayl": "Libyan Arabic",
+ "ayn": "Sanaani Arabic",
+ "ayo": "Ayoreo",
+ "ayp": "North Mesopotamian Arabic",
+ "ayq": "Ayi (Papua New Guinea)",
+ "ayr": "Central Aymara",
+ "ays": "Sorsogon Ayta",
+ "ayt": "Magbukun Ayta",
+ "ayu": "Ayu",
+ "ayz": "Mai Brat",
+ "az": "Azerbaijani",
+ "aza": "Azha",
+ "azb": "South Azerbaijani",
+ "azc": "Uto-Aztecan languages",
+ "azd": "Eastern Durango Nahuatl",
+ "azg": "San Pedro Amuzgos Amuzgo",
+ "azj": "North Azerbaijani",
+ "azm": "Ipalapa Amuzgo",
+ "azn": "Western Durango Nahuatl",
+ "azo": "Awing",
+ "azt": "Faire Atta",
+ "azz": "Highland Puebla Nahuatl",
+ "ba": "Bashkir",
+ "baa": "Babatana",
+ "bab": "Bainouk-Gunyuño",
+ "bac": "Badui",
+ "bad": "Banda languages",
+ "bae": "Baré",
+ "baf": "Nubaca",
+ "bag": "Tuki",
+ "bah": "Bahamas Creole English",
+ "bai": "Bamileke languages",
+ "baj": "Barakai",
+ "bal": "Baluchi",
+ "ban": "Balinese",
+ "bao": "Waimaha",
+ "bap": "Bantawa",
+ "bar": "Bavarian",
+ "bas": "Basa (Cameroon)",
+ "bat": "Baltic languages",
+ "bau": "Bada (Nigeria)",
+ "bav": "Vengo",
+ "baw": "Bambili-Bambui",
+ "bax": "Bamun",
+ "bay": "Batuley",
+ "bba": "Baatonum",
+ "bbb": "Barai",
+ "bbc": "Batak Toba",
+ "bbd": "Bau",
+ "bbe": "Bangba",
+ "bbf": "Baibai",
+ "bbg": "Barama",
+ "bbh": "Bugan",
+ "bbi": "Barombi",
+ "bbj": "Ghomálá'",
+ "bbk": "Babanki",
+ "bbl": "Bats",
+ "bbm": "Babango",
+ "bbn": "Uneapa",
+ "bbo": "Northern Bobo Madaré; Konabéré",
+ "bbp": "West Central Banda",
+ "bbq": "Bamali",
+ "bbr": "Girawa",
+ "bbs": "Bakpinka",
+ "bbt": "Mburku",
+ "bbu": "Kulung (Nigeria)",
+ "bbv": "Karnai",
+ "bbw": "Baba",
+ "bbx": "Bubia",
+ "bby": "Befang",
+ "bca": "Central Bai",
+ "bcb": "Bainouk-Samik",
+ "bcc": "Southern Balochi",
+ "bcd": "North Babar",
+ "bce": "Bamenyam",
+ "bcf": "Bamu",
+ "bcg": "Baga Pokur",
+ "bch": "Bariai",
+ "bci": "Baoulé",
+ "bcj": "Bardi",
+ "bck": "Bunuba",
+ "bcl": "Central Bikol",
+ "bcm": "Bannoni",
+ "bcn": "Bali (Nigeria)",
+ "bco": "Kaluli",
+ "bcp": "Bali (Democratic Republic of Congo)",
+ "bcq": "Bench",
+ "bcr": "Babine",
+ "bcs": "Kohumono",
+ "bct": "Bendi",
+ "bcu": "Awad Bing",
+ "bcv": "Shoo-Minda-Nye",
+ "bcw": "Bana",
+ "bcy": "Bacama",
+ "bcz": "Bainouk-Gunyaamolo",
+ "bda": "Bayot",
+ "bdb": "Basap",
+ "bdc": "Emberá-Baudó",
+ "bdd": "Bunama",
+ "bde": "Bade",
+ "bdf": "Biage",
+ "bdg": "Bonggi",
+ "bdh": "Baka (South Sudan)",
+ "bdi": "Burun",
+ "bdj": "Bai (South Sudan); Bai",
+ "bdk": "Budukh",
+ "bdl": "Indonesian Bajau",
+ "bdm": "Buduma",
+ "bdn": "Baldemu",
+ "bdo": "Morom",
+ "bdp": "Bende",
+ "bdq": "Bahnar",
+ "bdr": "West Coast Bajau",
+ "bds": "Burunge",
+ "bdt": "Bokoto",
+ "bdu": "Oroko",
+ "bdv": "Bodo Parja",
+ "bdw": "Baham",
+ "bdx": "Budong-Budong",
+ "bdy": "Bandjalang",
+ "bdz": "Badeshi",
+ "be": "Belarusian",
+ "bea": "Beaver",
+ "beb": "Bebele",
+ "bec": "Iceve-Maci",
+ "bed": "Bedoanas",
+ "bee": "Byangsi",
+ "bef": "Benabena",
+ "beg": "Belait",
+ "beh": "Biali",
+ "bei": "Bekati'",
+ "bej": "Beja; Bedawiyet",
+ "bek": "Bebeli",
+ "bem": "Bemba (Zambia)",
+ "beo": "Beami",
+ "bep": "Besoa",
+ "beq": "Beembe",
+ "ber": "Berber languages",
+ "bes": "Besme",
+ "bet": "Guiberoua Béte",
+ "beu": "Blagar",
+ "bev": "Daloa Bété",
+ "bew": "Betawi",
+ "bex": "Jur Modo",
+ "bey": "Beli (Papua New Guinea)",
+ "bez": "Bena (Tanzania)",
+ "bfa": "Bari",
+ "bfb": "Pauri Bareli",
+ "bfc": "Panyi Bai; Northern Bai",
+ "bfd": "Bafut",
+ "bfe": "Betaf; Tena",
+ "bff": "Bofi",
+ "bfg": "Busang Kayan",
+ "bfh": "Blafe",
+ "bfi": "British Sign Language",
+ "bfj": "Bafanji",
+ "bfk": "Ban Khor Sign Language",
+ "bfl": "Banda-Ndélé",
+ "bfm": "Mmen",
+ "bfn": "Bunak",
+ "bfo": "Malba Birifor",
+ "bfp": "Beba",
+ "bfq": "Badaga",
+ "bfr": "Bazigar",
+ "bfs": "Southern Bai",
+ "bft": "Balti",
+ "bfu": "Gahri",
+ "bfw": "Bondo",
+ "bfx": "Bantayanon",
+ "bfy": "Bagheli",
+ "bfz": "Mahasu Pahari",
+ "bg": "Bulgarian",
+ "bga": "Gwamhi-Wuri",
+ "bgb": "Bobongko",
+ "bgc": "Haryanvi",
+ "bgd": "Rathwi Bareli",
+ "bge": "Bauria",
+ "bgf": "Bangandu",
+ "bgg": "Bugun",
+ "bgi": "Giangan",
+ "bgj": "Bangolan",
+ "bgk": "Bit; Buxinhua",
+ "bgl": "Bo (Laos)",
+ "bgn": "Western Balochi",
+ "bgo": "Baga Koga",
+ "bgp": "Eastern Balochi",
+ "bgq": "Bagri",
+ "bgr": "Bawm Chin",
+ "bgs": "Tagabawa",
+ "bgt": "Bughotu",
+ "bgu": "Mbongno",
+ "bgv": "Warkay-Bipim",
+ "bgw": "Bhatri",
+ "bgx": "Balkan Gagauz Turkish",
+ "bgy": "Benggoi",
+ "bgz": "Banggai",
+ "bh": "Bihari languages",
+ "bha": "Bharia",
+ "bhb": "Bhili",
+ "bhc": "Biga",
+ "bhd": "Bhadrawahi",
+ "bhe": "Bhaya",
+ "bhf": "Odiai",
+ "bhg": "Binandere",
+ "bhh": "Bukharic",
+ "bhi": "Bhilali",
+ "bhj": "Bahing",
+ "bhl": "Bimin",
+ "bhm": "Bathari",
+ "bhn": "Bohtan Neo-Aramaic",
+ "bho": "Bhojpuri",
+ "bhp": "Bima",
+ "bhq": "Tukang Besi South",
+ "bhr": "Bara Malagasy",
+ "bhs": "Buwal",
+ "bht": "Bhattiyali",
+ "bhu": "Bhunjia",
+ "bhv": "Bahau",
+ "bhw": "Biak",
+ "bhx": "Bhalay",
+ "bhy": "Bhele",
+ "bhz": "Bada (Indonesia)",
+ "bi": "Bislama",
+ "bia": "Badimaya",
+ "bib": "Bissa; Bisa",
+ "bid": "Bidiyo",
+ "bie": "Bepour",
+ "bif": "Biafada",
+ "big": "Biangai",
+ "bik": "Bikol",
+ "bil": "Bile",
+ "bim": "Bimoba",
+ "bin": "Bini; Edo",
+ "bio": "Nai",
+ "bip": "Bila",
+ "biq": "Bipi",
+ "bir": "Bisorio",
+ "bit": "Berinomo",
+ "biu": "Biete",
+ "biv": "Southern Birifor",
+ "biw": "Kol (Cameroon)",
+ "bix": "Bijori",
+ "biy": "Birhor",
+ "biz": "Baloi",
+ "bja": "Budza",
+ "bjb": "Banggarla",
+ "bjc": "Bariji",
+ "bje": "Biao-Jiao Mien",
+ "bjf": "Barzani Jewish Neo-Aramaic",
+ "bjg": "Bidyogo",
+ "bjh": "Bahinemo",
+ "bji": "Burji",
+ "bjj": "Kanauji",
+ "bjk": "Barok",
+ "bjl": "Bulu (Papua New Guinea)",
+ "bjm": "Bajelani",
+ "bjn": "Banjar",
+ "bjo": "Mid-Southern Banda",
+ "bjp": "Fanamaket",
+ "bjr": "Binumarien",
+ "bjs": "Bajan",
+ "bjt": "Balanta-Ganja",
+ "bju": "Busuu",
+ "bjv": "Bedjond",
+ "bjw": "Bakwé",
+ "bjx": "Banao Itneg",
+ "bjy": "Bayali",
+ "bjz": "Baruga",
+ "bka": "Kyak",
+ "bkc": "Baka (Cameroon)",
+ "bkd": "Binukid; Talaandig",
+ "bkf": "Beeke",
+ "bkg": "Buraka",
+ "bkh": "Bakoko",
+ "bki": "Baki",
+ "bkj": "Pande",
+ "bkk": "Brokskat",
+ "bkl": "Berik",
+ "bkm": "Kom (Cameroon)",
+ "bkn": "Bukitan",
+ "bko": "Kwa'",
+ "bkp": "Boko (Democratic Republic of Congo)",
+ "bkq": "Bakairí",
+ "bkr": "Bakumpai",
+ "bks": "Northern Sorsoganon",
+ "bkt": "Boloki",
+ "bku": "Buhid",
+ "bkv": "Bekwarra",
+ "bkw": "Bekwel",
+ "bkx": "Baikeno",
+ "bky": "Bokyi",
+ "bkz": "Bungku",
+ "bla": "Siksika",
+ "blb": "Bilua",
+ "blc": "Bella Coola",
+ "bld": "Bolango",
+ "ble": "Balanta-Kentohe",
+ "blf": "Buol",
+ "blh": "Kuwaa",
+ "bli": "Bolia",
+ "blj": "Bolongan",
+ "blk": "Pa'o Karen; Pa'O",
+ "bll": "Biloxi",
+ "blm": "Beli (South Sudan)",
+ "bln": "Southern Catanduanes Bikol",
+ "blo": "Anii",
+ "blp": "Blablanga",
+ "blq": "Baluan-Pam",
+ "blr": "Blang",
+ "bls": "Balaesang",
+ "blt": "Tai Dam",
+ "blv": "Kibala; Bolo",
+ "blw": "Balangao",
+ "blx": "Mag-Indi Ayta",
+ "bly": "Notre",
+ "blz": "Balantak",
+ "bm": "Bambara",
+ "bma": "Lame",
+ "bmb": "Bembe",
+ "bmc": "Biem",
+ "bmd": "Baga Manduri",
+ "bme": "Limassa",
+ "bmf": "Bom-Kim",
+ "bmg": "Bamwe",
+ "bmh": "Kein",
+ "bmi": "Bagirmi",
+ "bmj": "Bote-Majhi",
+ "bmk": "Ghayavi",
+ "bml": "Bomboli",
+ "bmm": "Northern Betsimisaraka Malagasy",
+ "bmn": "Bina (Papua New Guinea)",
+ "bmo": "Bambalang",
+ "bmp": "Bulgebi",
+ "bmq": "Bomu",
+ "bmr": "Muinane",
+ "bms": "Bilma Kanuri",
+ "bmt": "Biao Mon",
+ "bmu": "Somba-Siawari",
+ "bmv": "Bum",
+ "bmw": "Bomwali",
+ "bmx": "Baimak",
+ "bmz": "Baramu",
+ "bn": "Bengali; Bangla",
+ "bna": "Bonerate",
+ "bnb": "Bookan",
+ "bnc": "Bontok",
+ "bnd": "Banda (Indonesia)",
+ "bne": "Bintauna",
+ "bnf": "Masiwang",
+ "bng": "Benga",
+ "bni": "Bangi",
+ "bnj": "Eastern Tawbuid",
+ "bnk": "Bierebo",
+ "bnl": "Boon",
+ "bnm": "Batanga",
+ "bnn": "Bunun",
+ "bno": "Bantoanon",
+ "bnp": "Bola",
+ "bnq": "Bantik",
+ "bnr": "Butmas-Tur",
+ "bns": "Bundeli",
+ "bnt": "Bantu languages",
+ "bnu": "Bentong",
+ "bnv": "Bonerif; Beneraf; Edwas",
+ "bnw": "Bisis",
+ "bnx": "Bangubangu",
+ "bny": "Bintulu",
+ "bnz": "Beezen",
+ "bo": "Tibetan",
+ "boa": "Bora",
+ "bob": "Aweer",
+ "boe": "Mundabli",
+ "bof": "Bolon",
+ "bog": "Bamako Sign Language",
+ "boh": "Boma",
+ "boi": "Barbareño",
+ "boj": "Anjam",
+ "bok": "Bonjo",
+ "bol": "Bole",
+ "bom": "Berom",
+ "bon": "Bine",
+ "boo": "Tiemacèwè Bozo",
+ "bop": "Bonkiman",
+ "boq": "Bogaya",
+ "bor": "Borôro",
+ "bot": "Bongo",
+ "bou": "Bondei",
+ "bov": "Tuwuli",
+ "bow": "Rema",
+ "box": "Buamu",
+ "boy": "Bodo (Central African Republic)",
+ "boz": "Tiéyaxo Bozo",
+ "bpa": "Daakaka",
+ "bpc": "Mbuk",
+ "bpd": "Banda-Banda",
+ "bpe": "Bauni",
+ "bpg": "Bonggo",
+ "bph": "Botlikh",
+ "bpi": "Bagupi",
+ "bpj": "Binji",
+ "bpk": "Orowe; 'Ôrôê",
+ "bpl": "Broome Pearling Lugger Pidgin",
+ "bpm": "Biyom",
+ "bpn": "Dzao Min",
+ "bpo": "Anasi",
+ "bpp": "Kaure",
+ "bpq": "Banda Malay",
+ "bpr": "Koronadal Blaan",
+ "bps": "Sarangani Blaan",
+ "bpt": "Barrow Point",
+ "bpu": "Bongu",
+ "bpv": "Bian Marind",
+ "bpw": "Bo (Papua New Guinea)",
+ "bpx": "Palya Bareli",
+ "bpy": "Bishnupriya",
+ "bpz": "Bilba",
+ "bqa": "Tchumbuli",
+ "bqb": "Bagusa",
+ "bqc": "Boko (Benin); Boo",
+ "bqd": "Bung",
+ "bqf": "Baga Kaloum",
+ "bqg": "Bago-Kusuntu",
+ "bqh": "Baima",
+ "bqi": "Bakhtiari",
+ "bqj": "Bandial",
+ "bqk": "Banda-Mbrès",
+ "bql": "Bilakura",
+ "bqm": "Wumboko",
+ "bqn": "Bulgarian Sign Language",
+ "bqo": "Balo",
+ "bqp": "Busa",
+ "bqq": "Biritai",
+ "bqr": "Burusu",
+ "bqs": "Bosngun",
+ "bqt": "Bamukumbit",
+ "bqu": "Boguru",
+ "bqv": "Koro Wachi; Begbere-Ejar",
+ "bqw": "Buru (Nigeria)",
+ "bqx": "Baangi",
+ "bqy": "Bengkala Sign Language",
+ "bqz": "Bakaka",
+ "br": "Breton",
+ "bra": "Braj",
+ "brb": "Brao; Lave",
+ "brc": "Berbice Creole Dutch",
+ "brd": "Baraamu",
+ "brf": "Bira",
+ "brg": "Baure",
+ "brh": "Brahui",
+ "bri": "Mokpwe",
+ "brj": "Bieria",
+ "brk": "Birked",
+ "brl": "Birwa",
+ "brm": "Barambu",
+ "brn": "Boruca",
+ "bro": "Brokkat",
+ "brp": "Barapasi",
+ "brq": "Breri",
+ "brr": "Birao",
+ "brs": "Baras",
+ "brt": "Bitare",
+ "bru": "Eastern Bru",
+ "brv": "Western Bru",
+ "brw": "Bellari",
+ "brx": "Bodo (India)",
+ "bry": "Burui",
+ "brz": "Bilbil",
+ "bs": "Bosnian",
+ "bsa": "Abinomn",
+ "bsb": "Brunei Bisaya",
+ "bsc": "Bassari; Oniyan",
+ "bse": "Wushi",
+ "bsf": "Bauchi",
+ "bsg": "Bashkardi",
+ "bsh": "Kati",
+ "bsi": "Bassossi",
+ "bsj": "Bangwinji",
+ "bsk": "Burushaski",
+ "bsl": "Basa-Gumna",
+ "bsm": "Busami",
+ "bsn": "Barasana-Eduria",
+ "bso": "Buso",
+ "bsp": "Baga Sitemu",
+ "bsq": "Bassa",
+ "bsr": "Bassa-Kontagora",
+ "bss": "Akoose",
+ "bst": "Basketo",
+ "bsu": "Bahonsuai",
+ "bsv": "Baga Sobané",
+ "bsw": "Baiso",
+ "bsx": "Yangkam",
+ "bsy": "Sabah Bisaya",
+ "bta": "Bata",
+ "btc": "Bati (Cameroon)",
+ "btd": "Batak Dairi",
+ "bte": "Gamo-Ningi",
+ "btf": "Birgit",
+ "btg": "Gagnoa Bété",
+ "bth": "Biatah Bidayuh",
+ "bti": "Burate",
+ "btj": "Bacanese Malay",
+ "btk": "Batak languages",
+ "btm": "Batak Mandailing",
+ "btn": "Ratagnon",
+ "bto": "Rinconada Bikol",
+ "btp": "Budibud",
+ "btq": "Batek",
+ "btr": "Baetora",
+ "bts": "Batak Simalungun",
+ "btt": "Bete-Bendi",
+ "btu": "Batu",
+ "btv": "Bateri",
+ "btw": "Butuanon",
+ "btx": "Batak Karo",
+ "bty": "Bobot",
+ "btz": "Batak Alas-Kluet",
+ "bua": "Buriat",
+ "bub": "Bua",
+ "buc": "Bushi",
+ "bud": "Ntcham",
+ "bue": "Beothuk",
+ "buf": "Bushoong",
+ "bug": "Buginese",
+ "buh": "Younuo Bunu",
+ "bui": "Bongili",
+ "buj": "Basa-Gurmana",
+ "buk": "Bugawac",
+ "bum": "Bulu (Cameroon)",
+ "bun": "Sherbro",
+ "buo": "Terei",
+ "bup": "Busoa",
+ "buq": "Brem",
+ "bus": "Bokobaru",
+ "but": "Bungain",
+ "buu": "Budu",
+ "buv": "Bun",
+ "buw": "Bubi",
+ "bux": "Boghom",
+ "buy": "Bullom So",
+ "buz": "Bukwen",
+ "bva": "Barein",
+ "bvb": "Bube",
+ "bvc": "Baelelea",
+ "bvd": "Baeggu",
+ "bve": "Berau Malay",
+ "bvf": "Boor",
+ "bvg": "Bonkeng",
+ "bvh": "Bure",
+ "bvi": "Belanda Viri",
+ "bvj": "Baan",
+ "bvk": "Bukat",
+ "bvl": "Bolivian Sign Language",
+ "bvm": "Bamunka",
+ "bvn": "Buna",
+ "bvo": "Bolgo",
+ "bvp": "Bumang",
+ "bvq": "Birri",
+ "bvr": "Burarra",
+ "bvt": "Bati (Indonesia)",
+ "bvu": "Bukit Malay",
+ "bvv": "Baniva",
+ "bvw": "Boga",
+ "bvx": "Dibole",
+ "bvy": "Baybayanon",
+ "bvz": "Bauzi",
+ "bwa": "Bwatoo",
+ "bwb": "Namosi-Naitasiri-Serua",
+ "bwc": "Bwile",
+ "bwd": "Bwaidoka",
+ "bwe": "Bwe Karen",
+ "bwf": "Boselewa",
+ "bwg": "Barwe",
+ "bwh": "Bishuo",
+ "bwi": "Baniwa",
+ "bwj": "Láá Láá Bwamu",
+ "bwk": "Bauwaki",
+ "bwl": "Bwela",
+ "bwm": "Biwat",
+ "bwn": "Wunai Bunu",
+ "bwo": "Boro (Ethiopia); Borna (Ethiopia)",
+ "bwp": "Mandobo Bawah",
+ "bwq": "Southern Bobo Madaré",
+ "bwr": "Bura-Pabir",
+ "bws": "Bomboma",
+ "bwt": "Bafaw-Balong",
+ "bwu": "Buli (Ghana)",
+ "bww": "Bwa",
+ "bwx": "Bu-Nao Bunu",
+ "bwy": "Cwi Bwamu",
+ "bwz": "Bwisi",
+ "bxa": "Tairaha",
+ "bxb": "Belanda Bor",
+ "bxc": "Molengue",
+ "bxd": "Pela",
+ "bxe": "Birale",
+ "bxf": "Bilur; Minigir",
+ "bxg": "Bangala",
+ "bxh": "Buhutu",
+ "bxi": "Pirlatapa",
+ "bxj": "Bayungu",
+ "bxk": "Bukusu; Lubukusu",
+ "bxl": "Jalkunan",
+ "bxm": "Mongolia Buriat",
+ "bxn": "Burduna",
+ "bxo": "Barikanchi",
+ "bxp": "Bebil",
+ "bxq": "Beele",
+ "bxr": "Russia Buriat",
+ "bxs": "Busam",
+ "bxu": "China Buriat",
+ "bxv": "Berakou",
+ "bxw": "Bankagooma",
+ "bxz": "Binahari",
+ "bya": "Batak",
+ "byb": "Bikya",
+ "byc": "Ubaghara",
+ "byd": "Benyadu'",
+ "bye": "Pouye",
+ "byf": "Bete",
+ "byg": "Baygo",
+ "byh": "Bhujel",
+ "byi": "Buyu",
+ "byj": "Bina (Nigeria)",
+ "byk": "Biao",
+ "byl": "Bayono",
+ "bym": "Bidjara",
+ "byn": "Bilin; Blin",
+ "byo": "Biyo",
+ "byp": "Bumaji",
+ "byq": "Basay",
+ "byr": "Baruya; Yipma",
+ "bys": "Burak",
+ "byt": "Berti",
+ "byv": "Medumba",
+ "byw": "Belhariya",
+ "byx": "Qaqet",
+ "byz": "Banaro",
+ "bza": "Bandi",
+ "bzb": "Andio",
+ "bzc": "Southern Betsimisaraka Malagasy",
+ "bzd": "Bribri",
+ "bze": "Jenaama Bozo",
+ "bzf": "Boikin",
+ "bzg": "Babuza",
+ "bzh": "Mapos Buang",
+ "bzi": "Bisu",
+ "bzj": "Belize Kriol English",
+ "bzk": "Nicaragua Creole English",
+ "bzl": "Boano (Sulawesi)",
+ "bzm": "Bolondo",
+ "bzn": "Boano (Maluku)",
+ "bzo": "Bozaba",
+ "bzp": "Kemberano",
+ "bzq": "Buli (Indonesia)",
+ "bzr": "Biri",
+ "bzs": "Brazilian Sign Language",
+ "bzt": "Brithenig",
+ "bzu": "Burmeso",
+ "bzv": "Naami",
+ "bzw": "Basa (Nigeria)",
+ "bzx": "Kɛlɛngaxo Bozo",
+ "bzy": "Obanliku",
+ "bzz": "Evant",
+ "ca": "Catalan; Valencian",
+ "caa": "Chortí",
+ "cab": "Garifuna",
+ "cac": "Chuj",
+ "cad": "Caddo",
+ "cae": "Lehar; Laalaa",
+ "caf": "Southern Carrier",
+ "cag": "Nivaclé",
+ "cah": "Cahuarano",
+ "cai": "Central American Indian languages",
+ "caj": "Chané",
+ "cak": "Kaqchikel; Cakchiquel",
+ "cal": "Carolinian",
+ "cam": "Cemuhî",
+ "can": "Chambri",
+ "cao": "Chácobo",
+ "cap": "Chipaya",
+ "caq": "Car Nicobarese",
+ "car": "Galibi Carib",
+ "cas": "Tsimané",
+ "cau": "Caucasian languages",
+ "cav": "Cavineña",
+ "caw": "Callawalla",
+ "cax": "Chiquitano",
+ "cay": "Cayuga",
+ "caz": "Canichana",
+ "cba": "Chibchan languages",
+ "cbb": "Cabiyarí",
+ "cbc": "Carapana",
+ "cbd": "Carijona",
+ "cbg": "Chimila",
+ "cbi": "Chachi",
+ "cbj": "Ede Cabe",
+ "cbk": "Chavacano",
+ "cbl": "Bualkhaw Chin",
+ "cbn": "Nyahkur",
+ "cbo": "Izora",
+ "cbq": "Tsucuba; Cuba",
+ "cbr": "Cashibo-Cacataibo",
+ "cbs": "Cashinahua",
+ "cbt": "Chayahuita",
+ "cbu": "Candoshi-Shapra",
+ "cbv": "Cacua",
+ "cbw": "Kinabalian",
+ "cby": "Carabayo",
+ "ccc": "Chamicuro",
+ "ccd": "Cafundo Creole",
+ "cce": "Chopi",
+ "ccg": "Samba Daka",
+ "cch": "Atsam",
+ "ccj": "Kasanga",
+ "ccl": "Cutchi-Swahili",
+ "ccm": "Malaccan Creole Malay",
+ "ccn": "North Caucasian languages",
+ "cco": "Comaltepec Chinantec",
+ "ccp": "Chakma",
+ "ccr": "Cacaopera",
+ "ccs": "South Caucasian languages",
+ "cda": "Choni",
+ "cdc": "Chadic languages",
+ "cdd": "Caddoan languages",
+ "cde": "Chenchu",
+ "cdf": "Chiru",
+ "cdh": "Chambeali",
+ "cdi": "Chodri",
+ "cdj": "Churahi",
+ "cdm": "Chepang",
+ "cdn": "Chaudangsi",
+ "cdo": "Min Dong Chinese",
+ "cdr": "Cinda-Regi-Tiyal",
+ "cds": "Chadian Sign Language",
+ "cdy": "Chadong",
+ "cdz": "Koda",
+ "ce": "Chechen",
+ "cea": "Lower Chehalis",
+ "ceb": "Cebuano",
+ "ceg": "Chamacoco",
+ "cek": "Eastern Khumi Chin",
+ "cel": "Celtic languages",
+ "cen": "Cen",
+ "cet": "Centúúm",
+ "cey": "Ekai Chin",
+ "cfa": "Dijim-Bwilim",
+ "cfd": "Cara",
+ "cfg": "Como Karim",
+ "cfm": "Falam Chin",
+ "cga": "Changriwa",
+ "cgc": "Kagayanen",
+ "cgg": "Chiga",
+ "cgk": "Chocangacakha",
+ "ch": "Chamorro",
+ "chb": "Chibcha",
+ "chc": "Catawba",
+ "chd": "Highland Oaxaca Chontal",
+ "chf": "Tabasco Chontal",
+ "chg": "Chagatai",
+ "chh": "Chinook",
+ "chj": "Ojitlán Chinantec",
+ "chk": "Chuukese",
+ "chl": "Cahuilla",
+ "chm": "Mari (Russia)",
+ "chn": "Chinook jargon",
+ "cho": "Choctaw",
+ "chp": "Chipewyan; Dene Suline",
+ "chq": "Quiotepec Chinantec",
+ "chr": "Cherokee",
+ "cht": "Cholón",
+ "chw": "Chuwabu",
+ "chx": "Chantyal",
+ "chy": "Cheyenne",
+ "chz": "Ozumacín Chinantec",
+ "cia": "Cia-Cia",
+ "cib": "Ci Gbe",
+ "cic": "Chickasaw",
+ "cid": "Chimariko",
+ "cie": "Cineni",
+ "cih": "Chinali",
+ "cik": "Chitkuli Kinnauri",
+ "cim": "Cimbrian",
+ "cin": "Cinta Larga",
+ "cip": "Chiapanec",
+ "cir": "Tiri; Haméa; Méa",
+ "ciw": "Chippewa",
+ "ciy": "Chaima",
+ "cja": "Western Cham",
+ "cje": "Chru",
+ "cjh": "Upper Chehalis",
+ "cji": "Chamalal",
+ "cjk": "Chokwe",
+ "cjm": "Eastern Cham",
+ "cjn": "Chenapian",
+ "cjo": "Ashéninka Pajonal",
+ "cjp": "Cabécar",
+ "cjs": "Shor",
+ "cjv": "Chuave",
+ "cjy": "Jinyu Chinese",
+ "ckb": "Central Kurdish",
+ "ckh": "Chak",
+ "ckl": "Cibak",
+ "ckm": "Chakavian",
+ "ckn": "Kaang Chin",
+ "cko": "Anufo",
+ "ckq": "Kajakse",
+ "ckr": "Kairak",
+ "cks": "Tayo",
+ "ckt": "Chukot",
+ "cku": "Koasati",
+ "ckv": "Kavalan",
+ "ckx": "Caka",
+ "cky": "Cakfem-Mushere",
+ "ckz": "Cakchiquel-Quiché Mixed Language",
+ "cla": "Ron",
+ "clc": "Chilcotin",
+ "cld": "Chaldean Neo-Aramaic",
+ "cle": "Lealao Chinantec",
+ "clh": "Chilisso",
+ "cli": "Chakali",
+ "clj": "Laitu Chin",
+ "clk": "Idu-Mishmi",
+ "cll": "Chala",
+ "clm": "Clallam",
+ "clo": "Lowland Oaxaca Chontal",
+ "clt": "Lautu Chin",
+ "clu": "Caluyanun",
+ "clw": "Chulym",
+ "cly": "Eastern Highland Chatino",
+ "cma": "Maa",
+ "cmc": "Chamic languages",
+ "cme": "Cerma",
+ "cmg": "Classical Mongolian",
+ "cmi": "Emberá-Chamí",
+ "cml": "Campalagian",
+ "cmm": "Michigamea",
+ "cmn": "Mandarin Chinese",
+ "cmo": "Central Mnong",
+ "cmr": "Mro-Khimi Chin",
+ "cms": "Messapic",
+ "cmt": "Camtho",
+ "cna": "Changthang",
+ "cnb": "Chinbon Chin",
+ "cnc": "Côông",
+ "cng": "Northern Qiang",
+ "cnh": "Hakha Chin; Haka Chin",
+ "cni": "Asháninka",
+ "cnk": "Khumi Chin",
+ "cnl": "Lalana Chinantec",
+ "cno": "Con",
+ "cnp": "Northern Ping Chinese; Northern Pinghua",
+ "cnq": "Chung",
+ "cnr": "Montenegrin",
+ "cns": "Central Asmat",
+ "cnt": "Tepetotutla Chinantec",
+ "cnu": "Chenoua",
+ "cnw": "Ngawn Chin",
+ "cnx": "Middle Cornish",
+ "co": "Corsican",
+ "coa": "Cocos Islands Malay",
+ "cob": "Chicomuceltec",
+ "coc": "Cocopa",
+ "cod": "Cocama-Cocamilla",
+ "coe": "Koreguaje",
+ "cof": "Colorado",
+ "cog": "Chong",
+ "coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma",
+ "coj": "Cochimi",
+ "cok": "Santa Teresa Cora",
+ "col": "Columbia-Wenatchi",
+ "com": "Comanche",
+ "con": "Cofán",
+ "coo": "Comox",
+ "cop": "Coptic",
+ "coq": "Coquille",
+ "cot": "Caquinte",
+ "cou": "Wamey",
+ "cov": "Cao Miao",
+ "cow": "Cowlitz",
+ "cox": "Nanti",
+ "coz": "Chochotec",
+ "cpa": "Palantla Chinantec",
+ "cpb": "Ucayali-Yurúa Ashéninka",
+ "cpc": "Ajyíninka Apurucayali",
+ "cpe": "English-based creoles and pidgins",
+ "cpf": "French-based creoles and pidgins",
+ "cpg": "Cappadocian Greek",
+ "cpi": "Chinese Pidgin English",
+ "cpn": "Cherepon",
+ "cpo": "Kpeego",
+ "cpp": "Portuguese-based creoles and pidgins",
+ "cps": "Capiznon",
+ "cpu": "Pichis Ashéninka",
+ "cpx": "Pu-Xian Chinese",
+ "cpy": "South Ucayali Ashéninka",
+ "cqd": "Chuanqiandian Cluster Miao",
+ "cr": "Cree",
+ "cra": "Chara",
+ "crb": "Island Carib",
+ "crc": "Lonwolwol",
+ "crd": "Coeur d'Alene",
+ "crf": "Caramanta",
+ "crg": "Michif",
+ "crh": "Crimean Tatar; Crimean Turkish",
+ "cri": "Sãotomense",
+ "crj": "Southern East Cree",
+ "crk": "Plains Cree",
+ "crl": "Northern East Cree",
+ "crm": "Moose Cree",
+ "crn": "El Nayar Cora",
+ "cro": "Crow",
+ "crp": "Creoles and pidgins",
+ "crq": "Iyo'wujwa Chorote",
+ "crr": "Carolina Algonquian",
+ "crs": "Seselwa Creole French",
+ "crt": "Iyojwa'ja Chorote",
+ "crv": "Chaura",
+ "crw": "Chrau",
+ "crx": "Carrier",
+ "cry": "Cori",
+ "crz": "Cruzeño",
+ "cs": "Czech",
+ "csa": "Chiltepec Chinantec",
+ "csb": "Kashubian",
+ "csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana",
+ "csd": "Chiangmai Sign Language",
+ "cse": "Czech Sign Language",
+ "csf": "Cuba Sign Language",
+ "csg": "Chilean Sign Language",
+ "csh": "Asho Chin",
+ "csi": "Coast Miwok",
+ "csj": "Songlai Chin",
+ "csk": "Jola-Kasa",
+ "csl": "Chinese Sign Language",
+ "csm": "Central Sierra Miwok",
+ "csn": "Colombian Sign Language",
+ "cso": "Sochiapam Chinantec; Sochiapan Chinantec",
+ "csp": "Southern Ping Chinese; Southern Pinghua",
+ "csq": "Croatia Sign Language",
+ "csr": "Costa Rican Sign Language",
+ "css": "Southern Ohlone",
+ "cst": "Northern Ohlone",
+ "csu": "Central Sudanic languages",
+ "csv": "Sumtu Chin",
+ "csw": "Swampy Cree",
+ "csx": "Cambodian Sign Language",
+ "csy": "Siyin Chin",
+ "csz": "Coos",
+ "cta": "Tataltepec Chatino",
+ "ctc": "Chetco",
+ "ctd": "Tedim Chin",
+ "cte": "Tepinapa Chinantec",
+ "ctg": "Chittagonian",
+ "cth": "Thaiphum Chin",
+ "ctl": "Tlacoatzintepec Chinantec",
+ "ctm": "Chitimacha",
+ "ctn": "Chhintange",
+ "cto": "Emberá-Catío",
+ "ctp": "Western Highland Chatino",
+ "cts": "Northern Catanduanes Bikol",
+ "ctt": "Wayanad Chetti",
+ "ctu": "Chol",
+ "cty": "Moundadan Chetty",
+ "ctz": "Zacatepec Chatino",
+ "cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic",
+ "cua": "Cua",
+ "cub": "Cubeo",
+ "cuc": "Usila Chinantec",
+ "cuh": "Chuka; Gichuka",
+ "cui": "Cuiba",
+ "cuj": "Mashco Piro",
+ "cuk": "San Blas Kuna",
+ "cul": "Culina; Kulina",
+ "cuo": "Cumanagoto",
+ "cup": "Cupeño",
+ "cuq": "Cun",
+ "cur": "Chhulung",
+ "cus": "Cushitic languages",
+ "cut": "Teutila Cuicatec",
+ "cuu": "Tai Ya",
+ "cuv": "Cuvok",
+ "cuw": "Chukwa",
+ "cux": "Tepeuxila Cuicatec",
+ "cuy": "Cuitlatec",
+ "cv": "Chuvash",
+ "cvg": "Chug",
+ "cvn": "Valle Nacional Chinantec",
+ "cwa": "Kabwa",
+ "cwb": "Maindo",
+ "cwd": "Woods Cree",
+ "cwe": "Kwere",
+ "cwg": "Chewong; Cheq Wong",
+ "cwt": "Kuwaataay",
+ "cy": "Welsh",
+ "cya": "Nopala Chatino",
+ "cyb": "Cayubaba",
+ "cyo": "Cuyonon",
+ "czh": "Huizhou Chinese",
+ "czk": "Knaanic",
+ "czn": "Zenzontepec Chatino",
+ "czo": "Min Zhong Chinese",
+ "czt": "Zotung Chin",
+ "da": "Danish",
+ "daa": "Dangaléat",
+ "dac": "Dambi",
+ "dad": "Marik",
+ "dae": "Duupa",
+ "dag": "Dagbani",
+ "dah": "Gwahatike",
+ "dai": "Day",
+ "daj": "Dar Fur Daju",
+ "dak": "Dakota",
+ "dal": "Dahalo",
+ "dam": "Damakawa",
+ "dao": "Daai Chin",
+ "daq": "Dandami Maria",
+ "dar": "Dargwa",
+ "das": "Daho-Doo",
+ "dau": "Dar Sila Daju",
+ "dav": "Taita; Dawida",
+ "daw": "Davawenyo",
+ "dax": "Dayi",
+ "day": "Land Dayak languages",
+ "daz": "Dao",
+ "dba": "Bangime",
+ "dbb": "Deno",
+ "dbd": "Dadiya",
+ "dbe": "Dabe",
+ "dbf": "Edopi",
+ "dbg": "Dogul Dom Dogon",
+ "dbi": "Doka",
+ "dbj": "Ida'an",
+ "dbl": "Dyirbal",
+ "dbm": "Duguri",
+ "dbn": "Duriankere",
+ "dbo": "Dulbu",
+ "dbp": "Duwai",
+ "dbq": "Daba",
+ "dbr": "Dabarre",
+ "dbt": "Ben Tey Dogon",
+ "dbu": "Bondum Dom Dogon",
+ "dbv": "Dungu",
+ "dbw": "Bankan Tey Dogon",
+ "dby": "Dibiyaso",
+ "dcc": "Deccan",
+ "dcr": "Negerhollands",
+ "dda": "Dadi Dadi",
+ "ddd": "Dongotono",
+ "dde": "Doondo",
+ "ddg": "Fataluku",
+ "ddi": "West Goodenough",
+ "ddj": "Jaru",
+ "ddn": "Dendi (Benin)",
+ "ddo": "Dido",
+ "ddr": "Dhudhuroa",
+ "dds": "Donno So Dogon",
+ "ddw": "Dawera-Daweloor",
+ "de": "German",
+ "dec": "Dagik",
+ "ded": "Dedua",
+ "dee": "Dewoin",
+ "def": "Dezfuli",
+ "deg": "Degema",
+ "deh": "Dehwari",
+ "dei": "Demisa",
+ "dek": "Dek",
+ "del": "Delaware",
+ "dem": "Dem",
+ "den": "Slave (Athapascan)",
+ "dep": "Pidgin Delaware",
+ "deq": "Dendi (Central African Republic)",
+ "der": "Deori",
+ "des": "Desano",
+ "dev": "Domung",
+ "dez": "Dengese",
+ "dga": "Southern Dagaare",
+ "dgb": "Bunoge Dogon",
+ "dgc": "Casiguran Dumagat Agta",
+ "dgd": "Dagaari Dioula",
+ "dge": "Degenan",
+ "dgg": "Doga",
+ "dgh": "Dghwede",
+ "dgi": "Northern Dagara",
+ "dgk": "Dagba",
+ "dgl": "Andaandi; Dongolawi",
+ "dgn": "Dagoman",
+ "dgo": "Dogri (individual language)",
+ "dgr": "Dogrib; Tłı̨chǫ",
+ "dgs": "Dogoso",
+ "dgt": "Ndra'ngith",
+ "dgw": "Daungwurrung",
+ "dgx": "Doghoro",
+ "dgz": "Daga",
+ "dhd": "Dhundari",
+ "dhg": "Dhangu-Djangu; Dhangu; Djangu",
+ "dhi": "Dhimal",
+ "dhl": "Dhalandji",
+ "dhm": "Zemba",
+ "dhn": "Dhanki",
+ "dho": "Dhodia",
+ "dhr": "Dhargari",
+ "dhs": "Dhaiso",
+ "dhu": "Dhurga",
+ "dhv": "Dehu; Drehu",
+ "dhw": "Dhanwar (Nepal)",
+ "dhx": "Dhungaloo",
+ "dia": "Dia",
+ "dib": "South Central Dinka",
+ "dic": "Lakota Dida",
+ "did": "Didinga",
+ "dif": "Dieri; Diyari",
+ "dig": "Digo; Chidigo",
+ "dih": "Kumiai",
+ "dii": "Dimbong",
+ "dij": "Dai",
+ "dik": "Southwestern Dinka",
+ "dil": "Dilling",
+ "dim": "Dime",
+ "din": "Dinka",
+ "dio": "Dibo",
+ "dip": "Northeastern Dinka",
+ "diq": "Dimli (individual language)",
+ "dir": "Dirim",
+ "dis": "Dimasa",
+ "diu": "Diriku",
+ "diw": "Northwestern Dinka",
+ "dix": "Dixon Reef",
+ "diy": "Diuwe",
+ "diz": "Ding",
+ "dja": "Djadjawurrung",
+ "djb": "Djinba",
+ "djc": "Dar Daju Daju",
+ "djd": "Djamindjung; Ngaliwurru",
+ "dje": "Zarma",
+ "djf": "Djangun",
+ "dji": "Djinang",
+ "djj": "Djeebbana",
+ "djk": "Eastern Maroon Creole; Businenge Tongo; Nenge",
+ "djm": "Jamsay Dogon",
+ "djn": "Jawoyn; Djauan",
+ "djo": "Jangkang",
+ "djr": "Djambarrpuyngu",
+ "dju": "Kapriman",
+ "djw": "Djawi",
+ "dka": "Dakpakha",
+ "dkg": "Kadung",
+ "dkk": "Dakka",
+ "dkr": "Kuijau",
+ "dks": "Southeastern Dinka",
+ "dkx": "Mazagway",
+ "dlg": "Dolgan",
+ "dlk": "Dahalik",
+ "dlm": "Dalmatian",
+ "dln": "Darlong",
+ "dma": "Duma",
+ "dmb": "Mombo Dogon",
+ "dmc": "Gavak",
+ "dmd": "Madhi Madhi",
+ "dme": "Dugwor",
+ "dmf": "Medefaidrin",
+ "dmg": "Upper Kinabatangan",
+ "dmk": "Domaaki",
+ "dml": "Dameli",
+ "dmm": "Dama",
+ "dmn": "Mande languages",
+ "dmo": "Kemedzung",
+ "dmr": "East Damar",
+ "dms": "Dampelas",
+ "dmu": "Dubu; Tebi",
+ "dmv": "Dumpas",
+ "dmw": "Mudburra",
+ "dmx": "Dema",
+ "dmy": "Demta; Sowari",
+ "dna": "Upper Grand Valley Dani",
+ "dnd": "Daonda",
+ "dne": "Ndendeule",
+ "dng": "Dungan",
+ "dni": "Lower Grand Valley Dani",
+ "dnj": "Dan",
+ "dnk": "Dengka",
+ "dnn": "Dzùùngoo",
+ "dno": "Ndrulo; Northern Lendu",
+ "dnr": "Danaru",
+ "dnt": "Mid Grand Valley Dani",
+ "dnu": "Danau",
+ "dnv": "Danu",
+ "dnw": "Western Dani",
+ "dny": "Dení",
+ "doa": "Dom",
+ "dob": "Dobu",
+ "doc": "Northern Dong",
+ "doe": "Doe",
+ "dof": "Domu",
+ "doh": "Dong",
+ "doi": "Dogri (macrolanguage)",
+ "dok": "Dondo",
+ "dol": "Doso",
+ "don": "Toura (Papua New Guinea)",
+ "doo": "Dongo",
+ "dop": "Lukpa",
+ "doq": "Dominican Sign Language",
+ "dor": "Dori'o",
+ "dos": "Dogosé",
+ "dot": "Dass",
+ "dov": "Dombe",
+ "dow": "Doyayo",
+ "dox": "Bussa",
+ "doy": "Dompo",
+ "doz": "Dorze",
+ "dpp": "Papar",
+ "dra": "Dravidian languages",
+ "drb": "Dair",
+ "drc": "Minderico",
+ "drd": "Darmiya",
+ "dre": "Dolpo",
+ "drg": "Rungus",
+ "dri": "C'Lela",
+ "drl": "Paakantyi",
+ "drn": "West Damar",
+ "dro": "Daro-Matu Melanau",
+ "drq": "Dura",
+ "drs": "Gedeo",
+ "drt": "Drents",
+ "dru": "Rukai",
+ "dry": "Darai",
+ "dsb": "Lower Sorbian",
+ "dse": "Dutch Sign Language",
+ "dsh": "Daasanach",
+ "dsi": "Disa",
+ "dsl": "Danish Sign Language",
+ "dsn": "Dusner",
+ "dso": "Desiya",
+ "dsq": "Tadaksahak",
+ "dsz": "Mardin Sign Language",
+ "dta": "Daur",
+ "dtb": "Labuk-Kinabatangan Kadazan",
+ "dtd": "Ditidaht",
+ "dth": "Adithinngithigh",
+ "dti": "Ana Tinga Dogon",
+ "dtk": "Tene Kan Dogon",
+ "dtm": "Tomo Kan Dogon",
+ "dtn": "Daatsʼíin",
+ "dto": "Tommo So Dogon",
+ "dtp": "Kadazan Dusun; Central Dusun",
+ "dtr": "Lotud",
+ "dts": "Toro So Dogon",
+ "dtt": "Toro Tegu Dogon",
+ "dtu": "Tebul Ure Dogon",
+ "dty": "Dotyali",
+ "dua": "Duala",
+ "dub": "Dubli",
+ "duc": "Duna",
+ "due": "Umiray Dumaget Agta",
+ "duf": "Dumbea; Drubea",
+ "dug": "Duruma; Chiduruma",
+ "duh": "Dungra Bhil",
+ "dui": "Dumun",
+ "duk": "Uyajitaya",
+ "dul": "Alabat Island Agta",
+ "dum": "Middle Dutch (ca. 1050-1350)",
+ "dun": "Dusun Deyah",
+ "duo": "Dupaninan Agta",
+ "dup": "Duano",
+ "duq": "Dusun Malang",
+ "dur": "Dii",
+ "dus": "Dumi",
+ "duu": "Drung",
+ "duv": "Duvle",
+ "duw": "Dusun Witu",
+ "dux": "Duungooma",
+ "duy": "Dicamay Agta",
+ "duz": "Duli-Gey",
+ "dv": "Dhivehi; Divehi; Maldivian",
+ "dva": "Duau",
+ "dwa": "Diri",
+ "dwk": "Dawik Kui",
+ "dwr": "Dawro",
+ "dws": "Dutton World Speedwords",
+ "dwu": "Dhuwal",
+ "dww": "Dawawa",
+ "dwy": "Dhuwaya",
+ "dwz": "Dewas Rai",
+ "dya": "Dyan",
+ "dyb": "Dyaberdyaber",
+ "dyd": "Dyugun",
+ "dyg": "Villa Viciosa Agta",
+ "dyi": "Djimini Senoufo",
+ "dym": "Yanda Dom Dogon",
+ "dyn": "Dyangadi; Dhanggatti",
+ "dyo": "Jola-Fonyi",
+ "dyu": "Dyula",
+ "dyy": "Djabugay; Dyaabugay",
+ "dz": "Dzongkha",
+ "dza": "Tunzu",
+ "dze": "Djiwarli",
+ "dzg": "Dazaga",
+ "dzl": "Dzalakha",
+ "dzn": "Dzando",
+ "eaa": "Karenggapa",
+ "ebc": "Beginci",
+ "ebg": "Ebughu",
+ "ebk": "Eastern Bontok",
+ "ebo": "Teke-Ebo",
+ "ebr": "Ebrié",
+ "ebu": "Embu; Kiembu",
+ "ecr": "Eteocretan",
+ "ecs": "Ecuadorian Sign Language",
+ "ecy": "Eteocypriot",
+ "ee": "Ewe",
+ "eee": "E",
+ "efa": "Efai",
+ "efe": "Efe",
+ "efi": "Efik",
+ "ega": "Ega",
+ "egl": "Emilian",
+ "egm": "Benamanga",
+ "ego": "Eggon",
+ "egx": "Egyptian languages",
+ "egy": "Egyptian (Ancient)",
+ "ehs": "Miyakubo Sign Language",
+ "ehu": "Ehueun",
+ "eip": "Eipomek",
+ "eit": "Eitiep",
+ "eiv": "Askopan",
+ "eja": "Ejamat",
+ "eka": "Ekajuk",
+ "eke": "Ekit",
+ "ekg": "Ekari",
+ "eki": "Eki",
+ "ekk": "Standard Estonian",
+ "ekl": "Kol (Bangladesh); Kol",
+ "ekm": "Elip",
+ "eko": "Koti",
+ "ekp": "Ekpeye",
+ "ekr": "Yace",
+ "eky": "Eastern Kayah",
+ "el": "Modern Greek (1453-)",
+ "ele": "Elepi",
+ "elh": "El Hugeirat",
+ "eli": "Nding",
+ "elk": "Elkei",
+ "elm": "Eleme",
+ "elo": "El Molo",
+ "elu": "Elu",
+ "elx": "Elamite",
+ "ema": "Emai-Iuleha-Ora",
+ "emb": "Embaloh",
+ "eme": "Emerillon",
+ "emg": "Eastern Meohang",
+ "emi": "Mussau-Emira",
+ "emk": "Eastern Maninkakan",
+ "emm": "Mamulique",
+ "emn": "Eman",
+ "emp": "Northern Emberá",
+ "emq": "Eastern Minyag",
+ "ems": "Pacific Gulf Yupik",
+ "emu": "Eastern Muria",
+ "emw": "Emplawas",
+ "emx": "Erromintxela",
+ "emy": "Epigraphic Mayan",
+ "emz": "Mbessa",
+ "en": "English",
+ "ena": "Apali",
+ "enb": "Markweeta",
+ "enc": "En",
+ "end": "Ende",
+ "enf": "Forest Enets",
+ "enh": "Tundra Enets",
+ "enl": "Enlhet",
+ "enm": "Middle English (1100-1500)",
+ "enn": "Engenni",
+ "eno": "Enggano",
+ "enq": "Enga",
+ "enr": "Emumu; Emem",
+ "enu": "Enu",
+ "env": "Enwan (Edo State)",
+ "enw": "Enwan (Akwa Ibom State)",
+ "enx": "Enxet",
+ "eo": "Esperanto",
+ "eot": "Beti (Côte d'Ivoire)",
+ "epi": "Epie",
+ "era": "Eravallan",
+ "erg": "Sie",
+ "erh": "Eruwa",
+ "eri": "Ogea",
+ "erk": "South Efate",
+ "ero": "Horpa",
+ "err": "Erre",
+ "ers": "Ersu",
+ "ert": "Eritai",
+ "erw": "Erokwanas",
+ "es": "Spanish; Castilian",
+ "ese": "Ese Ejja",
+ "esg": "Aheri Gondi",
+ "esh": "Eshtehardi",
+ "esi": "North Alaskan Inupiatun",
+ "esk": "Northwest Alaska Inupiatun",
+ "esl": "Egypt Sign Language",
+ "esm": "Esuma",
+ "esn": "Salvadoran Sign Language",
+ "eso": "Estonian Sign Language",
+ "esq": "Esselen",
+ "ess": "Central Siberian Yupik",
+ "esu": "Central Yupik",
+ "esx": "Eskimo-Aleut languages",
+ "esy": "Eskayan",
+ "et": "Estonian",
+ "etb": "Etebi",
+ "etc": "Etchemin",
+ "eth": "Ethiopian Sign Language",
+ "etn": "Eton (Vanuatu)",
+ "eto": "Eton (Cameroon)",
+ "etr": "Edolo",
+ "ets": "Yekhee",
+ "ett": "Etruscan",
+ "etu": "Ejagham",
+ "etx": "Eten",
+ "etz": "Semimi",
+ "eu": "Basque",
+ "euq": "Basque (family)",
+ "eve": "Even",
+ "evh": "Uvbie",
+ "evn": "Evenki",
+ "ewo": "Ewondo",
+ "ext": "Extremaduran",
+ "eya": "Eyak",
+ "eyo": "Keiyo",
+ "eza": "Ezaa",
+ "eze": "Uzekwe",
+ "fa": "Persian",
+ "faa": "Fasu",
+ "fab": "Fa d'Ambu",
+ "fad": "Wagi",
+ "faf": "Fagani",
+ "fag": "Finongan",
+ "fah": "Baissa Fali",
+ "fai": "Faiwol",
+ "faj": "Faita",
+ "fak": "Fang (Cameroon)",
+ "fal": "South Fali",
+ "fam": "Fam",
+ "fan": "Fang (Equatorial Guinea)",
+ "fap": "Paloor",
+ "far": "Fataleka",
+ "fat": "Fanti",
+ "fau": "Fayu",
+ "fax": "Fala",
+ "fay": "Southwestern Fars",
+ "faz": "Northwestern Fars",
+ "fbl": "West Albay Bikol",
+ "fcs": "Quebec Sign Language",
+ "fer": "Feroge",
+ "ff": "Fulah",
+ "ffi": "Foia Foia",
+ "ffm": "Maasina Fulfulde",
+ "fgr": "Fongoro",
+ "fi": "Finnish",
+ "fia": "Nobiin",
+ "fie": "Fyer",
+ "fif": "Faifi",
+ "fil": "Filipino; Pilipino",
+ "fip": "Fipa",
+ "fir": "Firan",
+ "fit": "Tornedalen Finnish; Meänkieli",
+ "fiu": "Finno-Ugrian languages",
+ "fiw": "Fiwaga",
+ "fj": "Fijian",
+ "fkk": "Kirya-Konzəl",
+ "fkv": "Kven Finnish",
+ "fla": "Kalispel-Pend d'Oreille",
+ "flh": "Foau",
+ "fli": "Fali",
+ "fll": "North Fali",
+ "fln": "Flinders Island",
+ "flr": "Fuliiru",
+ "fly": "Flaaitaal; Tsotsitaal",
+ "fmp": "Fe'fe'",
+ "fmu": "Far Western Muria",
+ "fnb": "Fanbak",
+ "fng": "Fanagalo",
+ "fni": "Fania",
+ "fo": "Faroese",
+ "fod": "Foodo",
+ "foi": "Foi",
+ "fom": "Foma",
+ "fon": "Fon",
+ "for": "Fore",
+ "fos": "Siraya",
+ "fox": "Formosan languages",
+ "fpe": "Fernando Po Creole English",
+ "fqs": "Fas",
+ "fr": "French",
+ "frc": "Cajun French",
+ "frd": "Fordata",
+ "frk": "Frankish",
+ "frm": "Middle French (ca. 1400-1600)",
+ "fro": "Old French (842-ca. 1400)",
+ "frp": "Arpitan; Francoprovençal",
+ "frq": "Forak",
+ "frr": "Northern Frisian",
+ "frs": "Eastern Frisian",
+ "frt": "Fortsenal",
+ "fse": "Finnish Sign Language",
+ "fsl": "French Sign Language",
+ "fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli",
+ "fub": "Adamawa Fulfulde",
+ "fuc": "Pulaar",
+ "fud": "East Futuna",
+ "fue": "Borgu Fulfulde",
+ "fuf": "Pular",
+ "fuh": "Western Niger Fulfulde",
+ "fui": "Bagirmi Fulfulde",
+ "fuj": "Ko",
+ "fum": "Fum",
+ "fun": "Fulniô",
+ "fuq": "Central-Eastern Niger Fulfulde",
+ "fur": "Friulian",
+ "fut": "Futuna-Aniwa",
+ "fuu": "Furu",
+ "fuv": "Nigerian Fulfulde",
+ "fuy": "Fuyug",
+ "fvr": "Fur",
+ "fwa": "Fwâi",
+ "fwe": "Fwe",
+ "fy": "Western Frisian",
+ "ga": "Irish",
+ "gaa": "Ga",
+ "gab": "Gabri",
+ "gac": "Mixed Great Andamanese",
+ "gad": "Gaddang",
+ "gae": "Guarequena",
+ "gaf": "Gende",
+ "gag": "Gagauz",
+ "gah": "Alekano",
+ "gai": "Borei",
+ "gaj": "Gadsup",
+ "gak": "Gamkonora",
+ "gal": "Galolen",
+ "gam": "Kandawo",
+ "gan": "Gan Chinese",
+ "gao": "Gants",
+ "gap": "Gal",
+ "gaq": "Gata'",
+ "gar": "Galeya",
+ "gas": "Adiwasi Garasia",
+ "gat": "Kenati",
+ "gau": "Mudhili Gadaba",
+ "gaw": "Nobonob",
+ "gax": "Borana-Arsi-Guji Oromo",
+ "gay": "Gayo",
+ "gaz": "West Central Oromo",
+ "gba": "Gbaya (Central African Republic)",
+ "gbb": "Kaytetye",
+ "gbd": "Karajarri",
+ "gbe": "Niksek",
+ "gbf": "Gaikundi",
+ "gbg": "Gbanziri",
+ "gbh": "Defi Gbe",
+ "gbi": "Galela",
+ "gbj": "Bodo Gadaba",
+ "gbk": "Gaddi",
+ "gbl": "Gamit",
+ "gbm": "Garhwali",
+ "gbn": "Mo'da",
+ "gbo": "Northern Grebo",
+ "gbp": "Gbaya-Bossangoa",
+ "gbq": "Gbaya-Bozoum",
+ "gbr": "Gbagyi",
+ "gbs": "Gbesi Gbe",
+ "gbu": "Gagadu",
+ "gbv": "Gbanu",
+ "gbw": "Gabi-Gabi",
+ "gbx": "Eastern Xwla Gbe",
+ "gby": "Gbari",
+ "gbz": "Zoroastrian Dari",
+ "gcc": "Mali",
+ "gcd": "Ganggalida",
+ "gce": "Galice",
+ "gcf": "Guadeloupean Creole French",
+ "gcl": "Grenadian Creole English",
+ "gcn": "Gaina",
+ "gcr": "Guianese Creole French",
+ "gct": "Colonia Tovar German",
+ "gd": "Scottish Gaelic; Gaelic",
+ "gda": "Gade Lohar",
+ "gdb": "Pottangi Ollar Gadaba",
+ "gdc": "Gugu Badhun",
+ "gdd": "Gedaged",
+ "gde": "Gude",
+ "gdf": "Guduf-Gava",
+ "gdg": "Ga'dang",
+ "gdh": "Gadjerawang; Gajirrabeng",
+ "gdi": "Gundi",
+ "gdj": "Gurdjar",
+ "gdk": "Gadang",
+ "gdl": "Dirasha",
+ "gdm": "Laal",
+ "gdn": "Umanakaina",
+ "gdo": "Ghodoberi",
+ "gdq": "Mehri",
+ "gdr": "Wipi",
+ "gds": "Ghandruk Sign Language",
+ "gdt": "Kungardutyi",
+ "gdu": "Gudu",
+ "gdx": "Godwari",
+ "gea": "Geruma",
+ "geb": "Kire",
+ "gec": "Gboloo Grebo",
+ "ged": "Gade",
+ "gef": "Gerai",
+ "geg": "Gengle",
+ "geh": "Hutterite German; Hutterisch",
+ "gei": "Gebe",
+ "gej": "Gen",
+ "gek": "Ywom",
+ "gel": "ut-Ma'in",
+ "gem": "Germanic languages",
+ "geq": "Geme",
+ "ges": "Geser-Gorom",
+ "gev": "Eviya",
+ "gew": "Gera",
+ "gex": "Garre",
+ "gey": "Enya",
+ "gez": "Geez",
+ "gfk": "Patpatar",
+ "gft": "Gafat",
+ "gga": "Gao",
+ "ggb": "Gbii",
+ "ggd": "Gugadj",
+ "gge": "Gurr-goni",
+ "ggg": "Gurgula",
+ "ggk": "Kungarakany",
+ "ggl": "Ganglau",
+ "ggt": "Gitua",
+ "ggu": "Gagu; Gban",
+ "ggw": "Gogodala",
+ "gha": "Ghadamès",
+ "ghc": "Hiberno-Scottish Gaelic",
+ "ghe": "Southern Ghale",
+ "ghh": "Northern Ghale",
+ "ghk": "Geko Karen",
+ "ghl": "Ghulfan",
+ "ghn": "Ghanongga",
+ "gho": "Ghomara",
+ "ghr": "Ghera",
+ "ghs": "Guhu-Samane",
+ "ght": "Kuke; Kutang Ghale",
+ "gia": "Kija",
+ "gib": "Gibanawa",
+ "gic": "Gail",
+ "gid": "Gidar",
+ "gie": "Gaɓogbo; Guébie",
+ "gig": "Goaria",
+ "gih": "Githabul",
+ "gii": "Girirra",
+ "gil": "Gilbertese",
+ "gim": "Gimi (Eastern Highlands)",
+ "gin": "Hinukh",
+ "gip": "Gimi (West New Britain)",
+ "giq": "Green Gelao",
+ "gir": "Red Gelao",
+ "gis": "North Giziga",
+ "git": "Gitxsan",
+ "giu": "Mulao",
+ "giw": "White Gelao",
+ "gix": "Gilima",
+ "giy": "Giyug",
+ "giz": "South Giziga",
+ "gjk": "Kachi Koli",
+ "gjm": "Gunditjmara",
+ "gjn": "Gonja",
+ "gjr": "Gurindji Kriol",
+ "gju": "Gujari",
+ "gka": "Guya",
+ "gkd": "Magɨ (Madang Province)",
+ "gke": "Ndai",
+ "gkn": "Gokana",
+ "gko": "Kok-Nar",
+ "gkp": "Guinea Kpelle",
+ "gku": "ǂUngkue",
+ "gl": "Galician",
+ "glb": "Belning",
+ "glc": "Bon Gula",
+ "gld": "Nanai",
+ "glh": "Northwest Pashai; Northwest Pashayi",
+ "glj": "Gula Iro",
+ "glk": "Gilaki",
+ "gll": "Garlali",
+ "glo": "Galambu",
+ "glr": "Glaro-Twabo",
+ "glu": "Gula (Chad)",
+ "glw": "Glavda",
+ "gly": "Gule",
+ "gma": "Gambera",
+ "gmb": "Gula'alaa",
+ "gmd": "Mághdì",
+ "gme": "East Germanic languages",
+ "gmg": "Magɨyi",
+ "gmh": "Middle High German (ca. 1050-1500)",
+ "gml": "Middle Low German",
+ "gmm": "Gbaya-Mbodomo",
+ "gmn": "Gimnime",
+ "gmq": "North Germanic languages",
+ "gmr": "Mirning; Mirniny",
+ "gmu": "Gumalu",
+ "gmv": "Gamo",
+ "gmw": "West Germanic languages",
+ "gmx": "Magoma",
+ "gmy": "Mycenaean Greek",
+ "gmz": "Mgbolizhia",
+ "gn": "Guarani",
+ "gna": "Kaansa",
+ "gnb": "Gangte",
+ "gnc": "Guanche",
+ "gnd": "Zulgo-Gemzek",
+ "gne": "Ganang",
+ "gng": "Ngangam",
+ "gnh": "Lere",
+ "gni": "Gooniyandi",
+ "gnj": "Ngen",
+ "gnk": "ǁGana",
+ "gnl": "Gangulu",
+ "gnm": "Ginuman",
+ "gnn": "Gumatj",
+ "gno": "Northern Gondi",
+ "gnq": "Gana",
+ "gnr": "Gureng Gureng",
+ "gnt": "Guntai",
+ "gnu": "Gnau",
+ "gnw": "Western Bolivian Guaraní",
+ "gnz": "Ganzi",
+ "goa": "Guro",
+ "gob": "Playero",
+ "goc": "Gorakor",
+ "god": "Godié",
+ "goe": "Gongduk",
+ "gof": "Gofa",
+ "gog": "Gogo",
+ "goh": "Old High German (ca. 750-1050)",
+ "goi": "Gobasi",
+ "goj": "Gowlan",
+ "gok": "Gowli",
+ "gol": "Gola",
+ "gom": "Goan Konkani",
+ "gon": "Gondi",
+ "goo": "Gone Dau",
+ "gop": "Yeretuar",
+ "goq": "Gorap",
+ "gor": "Gorontalo",
+ "gos": "Gronings",
+ "got": "Gothic",
+ "gou": "Gavar",
+ "gov": "Goo",
+ "gow": "Gorowa",
+ "gox": "Gobu",
+ "goy": "Goundo",
+ "goz": "Gozarkhani",
+ "gpa": "Gupa-Abawa",
+ "gpe": "Ghanaian Pidgin English",
+ "gpn": "Taiap",
+ "gqa": "Ga'anda",
+ "gqi": "Guiqiong",
+ "gqn": "Guana (Brazil)",
+ "gqr": "Gor",
+ "gqu": "Qau",
+ "gra": "Rajput Garasia",
+ "grb": "Grebo",
+ "grc": "Ancient Greek (to 1453)",
+ "grd": "Guruntum-Mbaaru",
+ "grg": "Madi",
+ "grh": "Gbiri-Niragu",
+ "gri": "Ghari",
+ "grj": "Southern Grebo",
+ "grk": "Greek languages",
+ "grm": "Kota Marudu Talantang",
+ "gro": "Groma",
+ "grq": "Gorovu",
+ "grr": "Taznatit",
+ "grs": "Gresi",
+ "grt": "Garo",
+ "gru": "Kistane",
+ "grv": "Central Grebo",
+ "grw": "Gweda",
+ "grx": "Guriaso",
+ "gry": "Barclayville Grebo",
+ "grz": "Guramalum",
+ "gse": "Ghanaian Sign Language",
+ "gsg": "German Sign Language",
+ "gsl": "Gusilay",
+ "gsm": "Guatemalan Sign Language",
+ "gsn": "Nema; Gusan",
+ "gso": "Southwest Gbaya",
+ "gsp": "Wasembo",
+ "gss": "Greek Sign Language",
+ "gsw": "Swiss German; Alemannic; Alsatian",
+ "gta": "Guató",
+ "gtu": "Aghu-Tharnggala",
+ "gu": "Gujarati",
+ "gua": "Shiki",
+ "gub": "Guajajára",
+ "guc": "Wayuu",
+ "gud": "Yocoboué Dida",
+ "gue": "Gurindji",
+ "guf": "Gupapuyngu",
+ "gug": "Paraguayan Guaraní",
+ "guh": "Guahibo",
+ "gui": "Eastern Bolivian Guaraní",
+ "guk": "Gumuz",
+ "gul": "Sea Island Creole English",
+ "gum": "Guambiano",
+ "gun": "Mbyá Guaraní",
+ "guo": "Guayabero",
+ "gup": "Gunwinggu",
+ "guq": "Aché",
+ "gur": "Farefare",
+ "gus": "Guinean Sign Language",
+ "gut": "Maléku Jaíka",
+ "guu": "Yanomamö",
+ "guw": "Gun",
+ "gux": "Gourmanchéma",
+ "guz": "Gusii; Ekegusii",
+ "gv": "Manx",
+ "gva": "Guana (Paraguay)",
+ "gvc": "Guanano",
+ "gve": "Duwet",
+ "gvf": "Golin",
+ "gvj": "Guajá",
+ "gvl": "Gulay",
+ "gvm": "Gurmana",
+ "gvn": "Kuku-Yalanji",
+ "gvo": "Gavião Do Jiparaná",
+ "gvp": "Pará Gavião",
+ "gvr": "Gurung",
+ "gvs": "Gumawana",
+ "gvy": "Guyani",
+ "gwa": "Mbato",
+ "gwb": "Gwa",
+ "gwc": "Gawri; Kalami",
+ "gwd": "Gawwada",
+ "gwe": "Gweno",
+ "gwf": "Gowro",
+ "gwg": "Moo",
+ "gwi": "Gwichʼin",
+ "gwj": "ǀGwi",
+ "gwm": "Awngthim",
+ "gwn": "Gwandara",
+ "gwr": "Gwere",
+ "gwt": "Gawar-Bati",
+ "gwu": "Guwamu",
+ "gww": "Kwini",
+ "gwx": "Gua",
+ "gxx": "Wè Southern",
+ "gya": "Northwest Gbaya",
+ "gyb": "Garus",
+ "gyd": "Kayardild",
+ "gye": "Gyem",
+ "gyf": "Gungabula",
+ "gyg": "Gbayi",
+ "gyi": "Gyele",
+ "gyl": "Gayil",
+ "gym": "Ngäbere",
+ "gyn": "Guyanese Creole English",
+ "gyo": "Gyalsumdo",
+ "gyr": "Guarayu",
+ "gyy": "Gunya",
+ "gyz": "Geji; Gyaazi",
+ "gza": "Ganza",
+ "gzi": "Gazi",
+ "gzn": "Gane",
+ "ha": "Hausa",
+ "haa": "Han",
+ "hab": "Hanoi Sign Language",
+ "hac": "Gurani",
+ "had": "Hatam",
+ "hae": "Eastern Oromo",
+ "haf": "Haiphong Sign Language",
+ "hag": "Hanga",
+ "hah": "Hahon",
+ "hai": "Haida",
+ "haj": "Hajong",
+ "hak": "Hakka Chinese",
+ "hal": "Halang",
+ "ham": "Hewa",
+ "han": "Hangaza",
+ "hao": "Hakö",
+ "hap": "Hupla",
+ "haq": "Ha",
+ "har": "Harari",
+ "has": "Haisla",
+ "hav": "Havu",
+ "haw": "Hawaiian",
+ "hax": "Southern Haida",
+ "hay": "Haya",
+ "haz": "Hazaragi",
+ "hba": "Hamba",
+ "hbb": "Huba",
+ "hbn": "Heiban",
+ "hbo": "Ancient Hebrew",
+ "hbu": "Habu",
+ "hca": "Andaman Creole Hindi",
+ "hch": "Huichol",
+ "hdn": "Northern Haida",
+ "hds": "Honduras Sign Language",
+ "hdy": "Hadiyya",
+ "he": "Hebrew",
+ "hea": "Northern Qiandong Miao",
+ "hed": "Herdé",
+ "heg": "Helong",
+ "heh": "Hehe",
+ "hei": "Heiltsuk",
+ "hem": "Hemba",
+ "hgm": "Haiǁom",
+ "hgw": "Haigwai",
+ "hhi": "Hoia Hoia",
+ "hhr": "Kerak",
+ "hhy": "Hoyahoya",
+ "hi": "Hindi",
+ "hia": "Lamang",
+ "hib": "Hibito",
+ "hid": "Hidatsa",
+ "hif": "Fiji Hindi",
+ "hig": "Kamwe",
+ "hih": "Pamosu",
+ "hii": "Hinduri",
+ "hij": "Hijuk",
+ "hik": "Seit-Kaitetu",
+ "hil": "Hiligaynon",
+ "him": "Himachali languages; Western Pahari languages",
+ "hio": "Tsoa",
+ "hir": "Himarimã",
+ "hit": "Hittite",
+ "hiw": "Hiw",
+ "hix": "Hixkaryána",
+ "hji": "Haji",
+ "hka": "Kahe",
+ "hke": "Hunde",
+ "hkh": "Khah; Poguli",
+ "hkk": "Hunjara-Kaina Ke",
+ "hkn": "Mel-Khaonh",
+ "hks": "Hong Kong Sign Language; Heung Kong Sau Yue",
+ "hla": "Halia",
+ "hlb": "Halbi",
+ "hld": "Halang Doan",
+ "hle": "Hlersu",
+ "hlt": "Matu Chin",
+ "hlu": "Hieroglyphic Luwian",
+ "hma": "Southern Mashan Hmong; Southern Mashan Miao",
+ "hmb": "Humburi Senni Songhay",
+ "hmc": "Central Huishui Hmong; Central Huishui Miao",
+ "hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao",
+ "hme": "Eastern Huishui Hmong; Eastern Huishui Miao",
+ "hmf": "Hmong Don",
+ "hmg": "Southwestern Guiyang Hmong",
+ "hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao",
+ "hmi": "Northern Huishui Hmong; Northern Huishui Miao",
+ "hmj": "Ge; Gejia",
+ "hmk": "Maek",
+ "hml": "Luopohe Hmong; Luopohe Miao",
+ "hmm": "Central Mashan Hmong; Central Mashan Miao",
+ "hmn": "Hmong; Mong",
+ "hmp": "Northern Mashan Hmong; Northern Mashan Miao",
+ "hmq": "Eastern Qiandong Miao",
+ "hmr": "Hmar",
+ "hms": "Southern Qiandong Miao",
+ "hmt": "Hamtai",
+ "hmu": "Hamap",
+ "hmv": "Hmong Dô",
+ "hmw": "Western Mashan Hmong; Western Mashan Miao",
+ "hmx": "Hmong-Mien languages",
+ "hmy": "Southern Guiyang Hmong; Southern Guiyang Miao",
+ "hmz": "Hmong Shua; Sinicized Miao",
+ "hna": "Mina (Cameroon)",
+ "hnd": "Southern Hindko",
+ "hne": "Chhattisgarhi",
+ "hng": "Hungu",
+ "hnh": "ǁAni",
+ "hni": "Hani",
+ "hnj": "Hmong Njua; Mong Leng; Mong Njua",
+ "hnn": "Hanunoo",
+ "hno": "Northern Hindko",
+ "hns": "Caribbean Hindustani",
+ "hnu": "Hung",
+ "ho": "Hiri Motu",
+ "hoa": "Hoava",
+ "hob": "Mari (Madang Province)",
+ "hoc": "Ho",
+ "hod": "Holma",
+ "hoe": "Horom",
+ "hoh": "Hobyót",
+ "hoi": "Holikachuk",
+ "hoj": "Hadothi; Haroti",
+ "hok": "Hokan languages",
+ "hol": "Holu",
+ "hom": "Homa",
+ "hoo": "Holoholo",
+ "hop": "Hopi",
+ "hor": "Horo",
+ "hos": "Ho Chi Minh City Sign Language",
+ "hot": "Hote; Malê",
+ "hov": "Hovongan",
+ "how": "Honi",
+ "hoy": "Holiya",
+ "hoz": "Hozo",
+ "hpo": "Hpon",
+ "hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language",
+ "hr": "Croatian",
+ "hra": "Hrangkhol",
+ "hrc": "Niwer Mil",
+ "hre": "Hre",
+ "hrk": "Haruku",
+ "hrm": "Horned Miao",
+ "hro": "Haroi",
+ "hrp": "Nhirrpi",
+ "hrt": "Hértevin",
+ "hru": "Hruso",
+ "hrw": "Warwar Feni",
+ "hrx": "Hunsrik",
+ "hrz": "Harzani",
+ "hsb": "Upper Sorbian",
+ "hsh": "Hungarian Sign Language",
+ "hsl": "Hausa Sign Language",
+ "hsn": "Xiang Chinese",
+ "hss": "Harsusi",
+ "ht": "Haitian; Haitian Creole",
+ "hti": "Hoti",
+ "hto": "Minica Huitoto",
+ "hts": "Hadza",
+ "htu": "Hitu",
+ "htx": "Middle Hittite",
+ "hu": "Hungarian",
+ "hub": "Huambisa",
+ "huc": "ǂHua; ǂʼAmkhoe",
+ "hud": "Huaulu",
+ "hue": "San Francisco Del Mar Huave",
+ "huf": "Humene",
+ "hug": "Huachipaeri",
+ "huh": "Huilliche",
+ "hui": "Huli",
+ "huj": "Northern Guiyang Hmong; Northern Guiyang Miao",
+ "huk": "Hulung",
+ "hul": "Hula",
+ "hum": "Hungana",
+ "huo": "Hu",
+ "hup": "Hupa",
+ "huq": "Tsat",
+ "hur": "Halkomelem",
+ "hus": "Huastec",
+ "hut": "Humla",
+ "huu": "Murui Huitoto",
+ "huv": "San Mateo Del Mar Huave",
+ "huw": "Hukumina",
+ "hux": "Nüpode Huitoto",
+ "huy": "Hulaulá",
+ "huz": "Hunzib",
+ "hvc": "Haitian Vodoun Culture Language",
+ "hve": "San Dionisio Del Mar Huave",
+ "hvk": "Haveke",
+ "hvn": "Sabu",
+ "hvv": "Santa María Del Mar Huave",
+ "hwa": "Wané",
+ "hwc": "Hawai'i Creole English; Hawai'i Pidgin",
+ "hwo": "Hwana",
+ "hy": "Armenian",
+ "hya": "Hya",
+ "hyw": "Western Armenian",
+ "hyx": "Armenian (family)",
+ "hz": "Herero",
+ "ia": "Interlingua (International Auxiliary Language Association)",
+ "iai": "Iaai",
+ "ian": "Iatmul",
+ "iar": "Purari",
+ "iba": "Iban",
+ "ibb": "Ibibio",
+ "ibd": "Iwaidja",
+ "ibe": "Akpes",
+ "ibg": "Ibanag",
+ "ibh": "Bih",
+ "ibl": "Ibaloi",
+ "ibm": "Agoi",
+ "ibn": "Ibino",
+ "ibr": "Ibuoro",
+ "ibu": "Ibu",
+ "iby": "Ibani",
+ "ica": "Ede Ica",
+ "ich": "Etkywan",
+ "icl": "Icelandic Sign Language",
+ "icr": "Islander Creole English",
+ "id": "Indonesian",
+ "ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi",
+ "idb": "Indo-Portuguese",
+ "idc": "Idon; Ajiya",
+ "idd": "Ede Idaca",
+ "ide": "Idere",
+ "idi": "Idi",
+ "idr": "Indri",
+ "ids": "Idesa",
+ "idt": "Idaté",
+ "idu": "Idoma",
+ "ie": "Interlingue; Occidental",
+ "ifa": "Amganad Ifugao",
+ "ifb": "Batad Ifugao; Ayangan Ifugao",
+ "ife": "Ifè",
+ "iff": "Ifo",
+ "ifk": "Tuwali Ifugao",
+ "ifm": "Teke-Fuumu",
+ "ifu": "Mayoyao Ifugao",
+ "ify": "Keley-I Kallahan",
+ "ig": "Igbo",
+ "igb": "Ebira",
+ "ige": "Igede",
+ "igg": "Igana",
+ "igl": "Igala",
+ "igm": "Kanggape",
+ "ign": "Ignaciano",
+ "igo": "Isebe",
+ "igs": "Interglossa",
+ "igw": "Igwe",
+ "ihb": "Iha Based Pidgin",
+ "ihi": "Ihievbe",
+ "ihp": "Iha",
+ "ihw": "Bidhawal",
+ "ii": "Sichuan Yi; Nuosu",
+ "iin": "Thiin",
+ "iir": "Indo-Iranian languages",
+ "ijc": "Izon",
+ "ije": "Biseni",
+ "ijj": "Ede Ije",
+ "ijn": "Kalabari",
+ "ijo": "Ijo languages",
+ "ijs": "Southeast Ijo",
+ "ik": "Inupiaq",
+ "ike": "Eastern Canadian Inuktitut",
+ "iki": "Iko",
+ "ikk": "Ika",
+ "ikl": "Ikulu",
+ "iko": "Olulumo-Ikom",
+ "ikp": "Ikpeshi",
+ "ikr": "Ikaranggal",
+ "iks": "Inuit Sign Language",
+ "ikt": "Inuinnaqtun; Western Canadian Inuktitut",
+ "ikv": "Iku-Gora-Ankwa",
+ "ikw": "Ikwere",
+ "ikx": "Ik",
+ "ikz": "Ikizu",
+ "ila": "Ile Ape",
+ "ilb": "Ila",
+ "ilg": "Garig-Ilgar",
+ "ili": "Ili Turki",
+ "ilk": "Ilongot",
+ "ilm": "Iranun (Malaysia)",
+ "ilo": "Iloko",
+ "ilp": "Iranun (Philippines)",
+ "ils": "International Sign",
+ "ilu": "Ili'uun",
+ "ilv": "Ilue",
+ "ima": "Mala Malasar",
+ "imi": "Anamgura",
+ "iml": "Miluk",
+ "imn": "Imonda",
+ "imo": "Imbongu",
+ "imr": "Imroing",
+ "ims": "Marsian",
+ "imt": "Imotong",
+ "imy": "Milyan",
+ "inb": "Inga",
+ "inc": "Indic languages",
+ "ine": "Indo-European languages",
+ "ing": "Degexit'an",
+ "inh": "Ingush",
+ "inj": "Jungle Inga",
+ "inl": "Indonesian Sign Language",
+ "inm": "Minaean",
+ "inn": "Isinai",
+ "ino": "Inoke-Yate",
+ "inp": "Iñapari",
+ "ins": "Indian Sign Language",
+ "int": "Intha",
+ "inz": "Ineseño",
+ "io": "Ido",
+ "ior": "Inor",
+ "iou": "Tuma-Irumu",
+ "iow": "Iowa-Oto",
+ "ipi": "Ipili",
+ "ipo": "Ipiko",
+ "iqu": "Iquito",
+ "iqw": "Ikwo",
+ "ira": "Iranian languages",
+ "ire": "Iresim",
+ "irh": "Irarutu",
+ "iri": "Rigwe; Irigwe",
+ "irk": "Iraqw",
+ "irn": "Irántxe",
+ "iro": "Iroquoian languages",
+ "irr": "Ir",
+ "iru": "Irula",
+ "irx": "Kamberau",
+ "iry": "Iraya",
+ "is": "Icelandic",
+ "isa": "Isabi",
+ "isc": "Isconahua",
+ "isd": "Isnag",
+ "ise": "Italian Sign Language",
+ "isg": "Irish Sign Language",
+ "ish": "Esan",
+ "isi": "Nkem-Nkum",
+ "isk": "Ishkashimi",
+ "ism": "Masimasi",
+ "isn": "Isanzu",
+ "iso": "Isoko",
+ "isr": "Israeli Sign Language",
+ "ist": "Istriot",
+ "isu": "Isu (Menchum Division)",
+ "it": "Italian",
+ "itb": "Binongan Itneg",
+ "itc": "Italic languages",
+ "itd": "Southern Tidung",
+ "ite": "Itene",
+ "iti": "Inlaod Itneg",
+ "itk": "Judeo-Italian",
+ "itl": "Itelmen",
+ "itm": "Itu Mbon Uzo",
+ "ito": "Itonama",
+ "itr": "Iteri",
+ "its": "Isekiri",
+ "itt": "Maeng Itneg",
+ "itv": "Itawit",
+ "itw": "Ito",
+ "itx": "Itik",
+ "ity": "Moyadan Itneg",
+ "itz": "Itzá",
+ "iu": "Inuktitut",
+ "ium": "Iu Mien",
+ "ivb": "Ibatan",
+ "ivv": "Ivatan",
+ "iwk": "I-Wak",
+ "iwm": "Iwam",
+ "iwo": "Iwur",
+ "iws": "Sepik Iwam",
+ "ixc": "Ixcatec",
+ "ixl": "Ixil",
+ "iya": "Iyayu",
+ "iyo": "Mesaka",
+ "iyx": "Yaka (Congo)",
+ "izh": "Ingrian",
+ "izr": "Izere",
+ "izz": "Izii",
+ "ja": "Japanese",
+ "jaa": "Jamamadí",
+ "jab": "Hyam",
+ "jac": "Popti'; Jakalteko",
+ "jad": "Jahanka",
+ "jae": "Yabem",
+ "jaf": "Jara",
+ "jah": "Jah Hut",
+ "jaj": "Zazao",
+ "jak": "Jakun",
+ "jal": "Yalahatan",
+ "jam": "Jamaican Creole English",
+ "jan": "Jandai",
+ "jao": "Yanyuwa",
+ "jaq": "Yaqay",
+ "jas": "New Caledonian Javanese",
+ "jat": "Jakati",
+ "jau": "Yaur",
+ "jax": "Jambi Malay",
+ "jay": "Yan-nhangu; Nhangu",
+ "jaz": "Jawe",
+ "jbe": "Judeo-Berber",
+ "jbi": "Badjiri",
+ "jbj": "Arandai",
+ "jbk": "Barikewa",
+ "jbm": "Bijim",
+ "jbn": "Nafusi",
+ "jbo": "Lojban",
+ "jbr": "Jofotek-Bromnya",
+ "jbt": "Jabutí",
+ "jbu": "Jukun Takum",
+ "jbw": "Yawijibaya",
+ "jcs": "Jamaican Country Sign Language",
+ "jct": "Krymchak",
+ "jda": "Jad",
+ "jdg": "Jadgali",
+ "jdt": "Judeo-Tat",
+ "jeb": "Jebero",
+ "jee": "Jerung",
+ "jeh": "Jeh",
+ "jei": "Yei",
+ "jek": "Jeri Kuo",
+ "jel": "Yelmek",
+ "jen": "Dza",
+ "jer": "Jere",
+ "jet": "Manem",
+ "jeu": "Jonkor Bourmataguil",
+ "jgb": "Ngbee",
+ "jge": "Judeo-Georgian",
+ "jgk": "Gwak",
+ "jgo": "Ngomba",
+ "jhi": "Jehai",
+ "jhs": "Jhankot Sign Language",
+ "jia": "Jina",
+ "jib": "Jibu",
+ "jic": "Tol",
+ "jid": "Bu (Kaduna State)",
+ "jie": "Jilbe",
+ "jig": "Jingulu; Djingili",
+ "jih": "sTodsde; Shangzhai",
+ "jii": "Jiiddu",
+ "jil": "Jilim",
+ "jim": "Jimi (Cameroon)",
+ "jio": "Jiamao",
+ "jiq": "Guanyinqiao; Lavrung",
+ "jit": "Jita",
+ "jiu": "Youle Jinuo",
+ "jiv": "Shuar",
+ "jiy": "Buyuan Jinuo",
+ "jje": "Jejueo",
+ "jjr": "Bankal",
+ "jka": "Kaera",
+ "jkm": "Mobwa Karen",
+ "jko": "Kubo",
+ "jkp": "Paku Karen",
+ "jkr": "Koro (India)",
+ "jks": "Amami Koniya Sign Language",
+ "jku": "Labir",
+ "jle": "Ngile",
+ "jls": "Jamaican Sign Language",
+ "jma": "Dima",
+ "jmb": "Zumbun",
+ "jmc": "Machame",
+ "jmd": "Yamdena",
+ "jmi": "Jimi (Nigeria)",
+ "jml": "Jumli",
+ "jmn": "Makuri Naga",
+ "jmr": "Kamara",
+ "jms": "Mashi (Nigeria)",
+ "jmw": "Mouwase",
+ "jmx": "Western Juxtlahuaca Mixtec",
+ "jna": "Jangshung",
+ "jnd": "Jandavra",
+ "jng": "Yangman",
+ "jni": "Janji",
+ "jnj": "Yemsa",
+ "jnl": "Rawat",
+ "jns": "Jaunsari",
+ "job": "Joba",
+ "jod": "Wojenaka",
+ "jog": "Jogi",
+ "jor": "Jorá",
+ "jos": "Jordanian Sign Language",
+ "jow": "Jowulu",
+ "jpa": "Jewish Palestinian Aramaic",
+ "jpr": "Judeo-Persian",
+ "jpx": "Japanese (family)",
+ "jqr": "Jaqaru",
+ "jra": "Jarai",
+ "jrb": "Judeo-Arabic",
+ "jrr": "Jiru",
+ "jrt": "Jakattoe",
+ "jru": "Japrería",
+ "jsl": "Japanese Sign Language",
+ "jua": "Júma",
+ "jub": "Wannu",
+ "juc": "Jurchen",
+ "jud": "Worodougou",
+ "juh": "Hõne",
+ "jui": "Ngadjuri",
+ "juk": "Wapan",
+ "jul": "Jirel",
+ "jum": "Jumjum",
+ "jun": "Juang",
+ "juo": "Jiba",
+ "jup": "Hupdë",
+ "jur": "Jurúna",
+ "jus": "Jumla Sign Language",
+ "jut": "Jutish",
+ "juu": "Ju",
+ "juw": "Wãpha",
+ "juy": "Juray",
+ "jv": "Javanese",
+ "jvd": "Javindo",
+ "jvn": "Caribbean Javanese",
+ "jwi": "Jwira-Pepesa",
+ "jya": "Jiarong",
+ "jye": "Judeo-Yemeni Arabic",
+ "jyy": "Jaya",
+ "ka": "Georgian",
+ "kaa": "Kara-Kalpak; Karakalpak",
+ "kab": "Kabyle",
+ "kac": "Kachin; Jingpho",
+ "kad": "Adara",
+ "kae": "Ketangalan",
+ "kaf": "Katso",
+ "kag": "Kajaman",
+ "kah": "Kara (Central African Republic)",
+ "kai": "Karekare",
+ "kaj": "Jju",
+ "kak": "Kalanguya; Kayapa Kallahan",
+ "kam": "Kamba (Kenya)",
+ "kao": "Xaasongaxango",
+ "kap": "Bezhta",
+ "kaq": "Capanahua",
+ "kar": "Karen languages",
+ "kav": "Katukína",
+ "kaw": "Kawi",
+ "kax": "Kao",
+ "kay": "Kamayurá",
+ "kba": "Kalarko",
+ "kbb": "Kaxuiâna",
+ "kbc": "Kadiwéu",
+ "kbd": "Kabardian",
+ "kbe": "Kanju",
+ "kbg": "Khamba",
+ "kbh": "Camsá",
+ "kbi": "Kaptiau",
+ "kbj": "Kari",
+ "kbk": "Grass Koiari",
+ "kbl": "Kanembu",
+ "kbm": "Iwal",
+ "kbn": "Kare (Central African Republic)",
+ "kbo": "Keliko",
+ "kbp": "Kabiyè",
+ "kbq": "Kamano",
+ "kbr": "Kafa",
+ "kbs": "Kande",
+ "kbt": "Abadi",
+ "kbu": "Kabutra",
+ "kbv": "Dera (Indonesia)",
+ "kbw": "Kaiep",
+ "kbx": "Ap Ma",
+ "kby": "Manga Kanuri",
+ "kbz": "Duhwa",
+ "kca": "Khanty",
+ "kcb": "Kawacha",
+ "kcc": "Lubila",
+ "kcd": "Ngkâlmpw Kanum",
+ "kce": "Kaivi",
+ "kcf": "Ukaan",
+ "kcg": "Tyap",
+ "kch": "Vono",
+ "kci": "Kamantan",
+ "kcj": "Kobiana",
+ "kck": "Kalanga",
+ "kcl": "Kela (Papua New Guinea); Kala",
+ "kcm": "Gula (Central African Republic)",
+ "kcn": "Nubi",
+ "kco": "Kinalakna",
+ "kcp": "Kanga",
+ "kcq": "Kamo",
+ "kcr": "Katla",
+ "kcs": "Koenoem",
+ "kct": "Kaian",
+ "kcu": "Kami (Tanzania)",
+ "kcv": "Kete",
+ "kcw": "Kabwari",
+ "kcx": "Kachama-Ganjule",
+ "kcy": "Korandje",
+ "kcz": "Konongo",
+ "kda": "Worimi",
+ "kdc": "Kutu",
+ "kdd": "Yankunytjatjara",
+ "kde": "Makonde",
+ "kdf": "Mamusi",
+ "kdg": "Seba",
+ "kdh": "Tem",
+ "kdi": "Kumam",
+ "kdj": "Karamojong",
+ "kdk": "Numèè; Kwényi",
+ "kdl": "Tsikimba",
+ "kdm": "Kagoma",
+ "kdn": "Kunda",
+ "kdo": "Kordofanian languages",
+ "kdp": "Kaningdon-Nindem",
+ "kdq": "Koch",
+ "kdr": "Karaim",
+ "kdt": "Kuy",
+ "kdu": "Kadaru",
+ "kdw": "Koneraw",
+ "kdx": "Kam",
+ "kdy": "Keder; Keijar",
+ "kdz": "Kwaja",
+ "kea": "Kabuverdianu",
+ "keb": "Kélé",
+ "kec": "Keiga",
+ "ked": "Kerewe",
+ "kee": "Eastern Keres",
+ "kef": "Kpessi",
+ "keg": "Tese",
+ "keh": "Keak",
+ "kei": "Kei",
+ "kej": "Kadar",
+ "kek": "Kekchí",
+ "kel": "Kela (Democratic Republic of Congo)",
+ "kem": "Kemak",
+ "ken": "Kenyang",
+ "keo": "Kakwa",
+ "kep": "Kaikadi",
+ "keq": "Kamar",
+ "ker": "Kera",
+ "kes": "Kugbo",
+ "ket": "Ket",
+ "keu": "Akebu",
+ "kev": "Kanikkaran",
+ "kew": "West Kewa",
+ "kex": "Kukna",
+ "key": "Kupia",
+ "kez": "Kukele",
+ "kfa": "Kodava",
+ "kfb": "Northwestern Kolami",
+ "kfc": "Konda-Dora",
+ "kfd": "Korra Koraga",
+ "kfe": "Kota (India)",
+ "kff": "Koya",
+ "kfg": "Kudiya",
+ "kfh": "Kurichiya",
+ "kfi": "Kannada Kurumba",
+ "kfj": "Kemiehua",
+ "kfk": "Kinnauri",
+ "kfl": "Kung",
+ "kfm": "Khunsari",
+ "kfn": "Kuk",
+ "kfo": "Koro (Côte d'Ivoire)",
+ "kfp": "Korwa",
+ "kfq": "Korku",
+ "kfr": "Kachhi; Kutchi",
+ "kfs": "Bilaspuri",
+ "kft": "Kanjari",
+ "kfu": "Katkari",
+ "kfv": "Kurmukar",
+ "kfw": "Kharam Naga",
+ "kfx": "Kullu Pahari",
+ "kfy": "Kumaoni",
+ "kfz": "Koromfé",
+ "kg": "Kongo",
+ "kga": "Koyaga",
+ "kgb": "Kawe",
+ "kge": "Komering",
+ "kgf": "Kube",
+ "kgg": "Kusunda",
+ "kgi": "Selangor Sign Language",
+ "kgj": "Gamale Kham",
+ "kgk": "Kaiwá",
+ "kgl": "Kunggari",
+ "kgm": "Karipúna",
+ "kgn": "Karingani",
+ "kgo": "Krongo",
+ "kgp": "Kaingang",
+ "kgq": "Kamoro",
+ "kgr": "Abun",
+ "kgs": "Kumbainggar",
+ "kgt": "Somyev",
+ "kgu": "Kobol",
+ "kgv": "Karas",
+ "kgw": "Karon Dori",
+ "kgx": "Kamaru",
+ "kgy": "Kyerung",
+ "kha": "Khasi",
+ "khb": "Lü",
+ "khc": "Tukang Besi North",
+ "khd": "Bädi Kanum",
+ "khe": "Korowai",
+ "khf": "Khuen",
+ "khg": "Khams Tibetan",
+ "khh": "Kehu",
+ "khi": "Khoisan languages",
+ "khj": "Kuturmi",
+ "khk": "Halh Mongolian",
+ "khl": "Lusi",
+ "khn": "Khandesi",
+ "kho": "Khotanese; Sakan",
+ "khp": "Kapori; Kapauri",
+ "khq": "Koyra Chiini Songhay",
+ "khr": "Kharia",
+ "khs": "Kasua",
+ "kht": "Khamti",
+ "khu": "Nkhumbi",
+ "khv": "Khvarshi",
+ "khw": "Khowar",
+ "khx": "Kanu",
+ "khy": "Kele (Democratic Republic of Congo)",
+ "khz": "Keapara",
+ "ki": "Kikuyu; Gikuyu",
+ "kia": "Kim",
+ "kib": "Koalib",
+ "kic": "Kickapoo",
+ "kid": "Koshin",
+ "kie": "Kibet",
+ "kif": "Eastern Parbate Kham",
+ "kig": "Kimaama; Kimaghima",
+ "kih": "Kilmeri",
+ "kii": "Kitsai",
+ "kij": "Kilivila",
+ "kil": "Kariya",
+ "kim": "Karagas",
+ "kio": "Kiowa",
+ "kip": "Sheshi Kham",
+ "kiq": "Kosadle; Kosare",
+ "kis": "Kis",
+ "kit": "Agob",
+ "kiu": "Kirmanjki (individual language)",
+ "kiv": "Kimbu",
+ "kiw": "Northeast Kiwai",
+ "kix": "Khiamniungan Naga",
+ "kiy": "Kirikiri",
+ "kiz": "Kisi",
+ "kj": "Kuanyama; Kwanyama",
+ "kja": "Mlap",
+ "kjb": "Q'anjob'al; Kanjobal",
+ "kjc": "Coastal Konjo",
+ "kjd": "Southern Kiwai",
+ "kje": "Kisar",
+ "kjg": "Khmu",
+ "kjh": "Khakas",
+ "kji": "Zabana",
+ "kjj": "Khinalugh",
+ "kjk": "Highland Konjo",
+ "kjl": "Western Parbate Kham",
+ "kjm": "Kháng",
+ "kjn": "Kunjen",
+ "kjo": "Harijan Kinnauri",
+ "kjp": "Pwo Eastern Karen",
+ "kjq": "Western Keres",
+ "kjr": "Kurudu",
+ "kjs": "East Kewa",
+ "kjt": "Phrae Pwo Karen",
+ "kju": "Kashaya",
+ "kjv": "Kaikavian Literary Language",
+ "kjx": "Ramopa",
+ "kjy": "Erave",
+ "kjz": "Bumthangkha",
+ "kk": "Kazakh",
+ "kka": "Kakanda",
+ "kkb": "Kwerisa",
+ "kkc": "Odoodee",
+ "kkd": "Kinuku",
+ "kke": "Kakabe",
+ "kkf": "Kalaktang Monpa",
+ "kkg": "Mabaka Valley Kalinga",
+ "kkh": "Khün",
+ "kki": "Kagulu",
+ "kkj": "Kako",
+ "kkk": "Kokota",
+ "kkl": "Kosarek Yale",
+ "kkm": "Kiong",
+ "kkn": "Kon Keu",
+ "kko": "Karko",
+ "kkp": "Gugubera; Koko-Bera",
+ "kkq": "Kaeku",
+ "kkr": "Kir-Balar",
+ "kks": "Giiwo",
+ "kkt": "Koi",
+ "kku": "Tumi",
+ "kkv": "Kangean",
+ "kkw": "Teke-Kukuya",
+ "kkx": "Kohin",
+ "kky": "Guugu Yimidhirr; Guguyimidjir",
+ "kkz": "Kaska",
+ "kl": "Kalaallisut; Greenlandic",
+ "kla": "Klamath-Modoc",
+ "klb": "Kiliwa",
+ "klc": "Kolbila",
+ "kld": "Gamilaraay",
+ "kle": "Kulung (Nepal)",
+ "klf": "Kendeje",
+ "klg": "Tagakaulo",
+ "klh": "Weliki",
+ "kli": "Kalumpang",
+ "klj": "Khalaj",
+ "klk": "Kono (Nigeria)",
+ "kll": "Kagan Kalagan",
+ "klm": "Migum",
+ "kln": "Kalenjin",
+ "klo": "Kapya",
+ "klp": "Kamasa",
+ "klq": "Rumu",
+ "klr": "Khaling",
+ "kls": "Kalasha",
+ "klt": "Nukna",
+ "klu": "Klao",
+ "klv": "Maskelynes",
+ "klw": "Tado; Lindu",
+ "klx": "Koluwawa",
+ "kly": "Kalao",
+ "klz": "Kabola",
+ "km": "Khmer; Central Khmer",
+ "kma": "Konni",
+ "kmb": "Kimbundu",
+ "kmc": "Southern Dong",
+ "kmd": "Majukayang Kalinga",
+ "kme": "Bakole",
+ "kmf": "Kare (Papua New Guinea)",
+ "kmg": "Kâte",
+ "kmh": "Kalam",
+ "kmi": "Kami (Nigeria)",
+ "kmj": "Kumarbhag Paharia",
+ "kmk": "Limos Kalinga",
+ "kml": "Tanudan Kalinga",
+ "kmm": "Kom (India)",
+ "kmn": "Awtuw",
+ "kmo": "Kwoma",
+ "kmp": "Gimme",
+ "kmq": "Kwama",
+ "kmr": "Northern Kurdish",
+ "kms": "Kamasau",
+ "kmt": "Kemtuik",
+ "kmu": "Kanite",
+ "kmv": "Karipúna Creole French",
+ "kmw": "Komo (Democratic Republic of Congo)",
+ "kmx": "Waboda",
+ "kmy": "Koma",
+ "kmz": "Khorasani Turkish",
+ "kn": "Kannada",
+ "kna": "Dera (Nigeria)",
+ "knb": "Lubuagan Kalinga",
+ "knc": "Central Kanuri",
+ "knd": "Konda",
+ "kne": "Kankanaey",
+ "knf": "Mankanya",
+ "kng": "Koongo",
+ "kni": "Kanufi",
+ "knj": "Western Kanjobal",
+ "knk": "Kuranko",
+ "knl": "Keninjal",
+ "knm": "Kanamarí",
+ "knn": "Konkani (individual language)",
+ "kno": "Kono (Sierra Leone)",
+ "knp": "Kwanja",
+ "knq": "Kintaq",
+ "knr": "Kaningra",
+ "kns": "Kensiu",
+ "knt": "Panoan Katukína",
+ "knu": "Kono (Guinea)",
+ "knv": "Tabo",
+ "knw": "Kung-Ekoka",
+ "knx": "Kendayan; Salako",
+ "kny": "Kanyok",
+ "knz": "Kalamsé",
+ "ko": "Korean",
+ "koa": "Konomala",
+ "koc": "Kpati",
+ "kod": "Kodi",
+ "koe": "Kacipo-Bale Suri",
+ "kof": "Kubi",
+ "kog": "Cogui; Kogi",
+ "koh": "Koyo",
+ "koi": "Komi-Permyak",
+ "kok": "Konkani (macrolanguage)",
+ "kol": "Kol (Papua New Guinea)",
+ "koo": "Konzo",
+ "kop": "Waube",
+ "koq": "Kota (Gabon)",
+ "kos": "Kosraean",
+ "kot": "Lagwan",
+ "kou": "Koke",
+ "kov": "Kudu-Camo",
+ "kow": "Kugama",
+ "koy": "Koyukon",
+ "koz": "Korak",
+ "kpa": "Kutto",
+ "kpb": "Mullu Kurumba",
+ "kpc": "Curripaco",
+ "kpd": "Koba",
+ "kpe": "Kpelle",
+ "kpf": "Komba",
+ "kpg": "Kapingamarangi",
+ "kph": "Kplang",
+ "kpi": "Kofei",
+ "kpj": "Karajá",
+ "kpk": "Kpan",
+ "kpl": "Kpala",
+ "kpm": "Koho",
+ "kpn": "Kepkiriwát",
+ "kpo": "Ikposo",
+ "kpq": "Korupun-Sela",
+ "kpr": "Korafe-Yegha",
+ "kps": "Tehit",
+ "kpt": "Karata",
+ "kpu": "Kafoa",
+ "kpv": "Komi-Zyrian",
+ "kpw": "Kobon",
+ "kpx": "Mountain Koiali",
+ "kpy": "Koryak",
+ "kpz": "Kupsabiny",
+ "kqa": "Mum",
+ "kqb": "Kovai",
+ "kqc": "Doromu-Koki",
+ "kqd": "Koy Sanjaq Surat",
+ "kqe": "Kalagan",
+ "kqf": "Kakabai",
+ "kqg": "Khe",
+ "kqh": "Kisankasa",
+ "kqi": "Koitabu",
+ "kqj": "Koromira",
+ "kqk": "Kotafon Gbe",
+ "kql": "Kyenele",
+ "kqm": "Khisa",
+ "kqn": "Kaonde",
+ "kqo": "Eastern Krahn",
+ "kqp": "Kimré",
+ "kqq": "Krenak",
+ "kqr": "Kimaragang",
+ "kqs": "Northern Kissi",
+ "kqt": "Klias River Kadazan",
+ "kqu": "Seroa",
+ "kqv": "Okolod",
+ "kqw": "Kandas",
+ "kqx": "Mser",
+ "kqy": "Koorete",
+ "kqz": "Korana",
+ "kr": "Kanuri",
+ "kra": "Kumhali",
+ "krb": "Karkin",
+ "krc": "Karachay-Balkar",
+ "krd": "Kairui-Midiki",
+ "kre": "Panará",
+ "krf": "Koro (Vanuatu)",
+ "krh": "Kurama",
+ "kri": "Krio",
+ "krj": "Kinaray-A",
+ "krk": "Kerek",
+ "krl": "Karelian",
+ "krn": "Sapo",
+ "kro": "Kru languages",
+ "krp": "Korop",
+ "krr": "Krung",
+ "krs": "Gbaya (Sudan)",
+ "krt": "Tumari Kanuri",
+ "kru": "Kurukh",
+ "krv": "Kavet",
+ "krw": "Western Krahn",
+ "krx": "Karon",
+ "kry": "Kryts",
+ "krz": "Sota Kanum",
+ "ks": "Kashmiri",
+ "ksa": "Shuwa-Zamani",
+ "ksb": "Shambala",
+ "ksc": "Southern Kalinga",
+ "ksd": "Kuanua",
+ "kse": "Kuni",
+ "ksf": "Bafia",
+ "ksg": "Kusaghe",
+ "ksh": "Kölsch",
+ "ksi": "Krisa; I'saka",
+ "ksj": "Uare",
+ "ksk": "Kansa",
+ "ksl": "Kumalu",
+ "ksm": "Kumba",
+ "ksn": "Kasiguranin",
+ "kso": "Kofa",
+ "ksp": "Kaba",
+ "ksq": "Kwaami",
+ "ksr": "Borong",
+ "kss": "Southern Kisi",
+ "kst": "Winyé",
+ "ksu": "Khamyang",
+ "ksv": "Kusu",
+ "ksw": "S'gaw Karen",
+ "ksx": "Kedang",
+ "ksy": "Kharia Thar",
+ "ksz": "Kodaku",
+ "kta": "Katua",
+ "ktb": "Kambaata",
+ "ktc": "Kholok",
+ "ktd": "Kokata; Kukatha",
+ "kte": "Nubri",
+ "ktf": "Kwami",
+ "ktg": "Kalkutung",
+ "kth": "Karanga",
+ "kti": "North Muyu",
+ "ktj": "Plapo Krumen",
+ "ktk": "Kaniet",
+ "ktl": "Koroshi",
+ "ktm": "Kurti",
+ "ktn": "Karitiâna",
+ "kto": "Kuot",
+ "ktp": "Kaduo",
+ "ktq": "Katabaga",
+ "kts": "South Muyu",
+ "ktt": "Ketum",
+ "ktu": "Kituba (Democratic Republic of Congo)",
+ "ktv": "Eastern Katu",
+ "ktw": "Kato",
+ "ktx": "Kaxararí",
+ "kty": "Kango (Bas-Uélé District)",
+ "ktz": "Juǀʼhoan; Juǀʼhoansi",
+ "ku": "Kurdish",
+ "kub": "Kutep",
+ "kuc": "Kwinsu",
+ "kud": "'Auhelawa",
+ "kue": "Kuman (Papua New Guinea)",
+ "kuf": "Western Katu",
+ "kug": "Kupa",
+ "kuh": "Kushi",
+ "kui": "Kuikúro-Kalapálo; Kalapalo",
+ "kuj": "Kuria",
+ "kuk": "Kepo'",
+ "kul": "Kulere",
+ "kum": "Kumyk",
+ "kun": "Kunama",
+ "kuo": "Kumukio",
+ "kup": "Kunimaipa",
+ "kuq": "Karipuna",
+ "kus": "Kusaal",
+ "kut": "Kutenai",
+ "kuu": "Upper Kuskokwim",
+ "kuv": "Kur",
+ "kuw": "Kpagua",
+ "kux": "Kukatja",
+ "kuy": "Kuuku-Ya'u",
+ "kuz": "Kunza",
+ "kv": "Komi",
+ "kva": "Bagvalal",
+ "kvb": "Kubu",
+ "kvc": "Kove",
+ "kvd": "Kui (Indonesia)",
+ "kve": "Kalabakan",
+ "kvf": "Kabalai",
+ "kvg": "Kuni-Boazi",
+ "kvh": "Komodo",
+ "kvi": "Kwang",
+ "kvj": "Psikye",
+ "kvk": "Korean Sign Language",
+ "kvl": "Kayaw",
+ "kvm": "Kendem",
+ "kvn": "Border Kuna",
+ "kvo": "Dobel",
+ "kvp": "Kompane",
+ "kvq": "Geba Karen",
+ "kvr": "Kerinci",
+ "kvt": "Lahta Karen; Lahta",
+ "kvu": "Yinbaw Karen",
+ "kvv": "Kola",
+ "kvw": "Wersing",
+ "kvx": "Parkari Koli",
+ "kvy": "Yintale Karen; Yintale",
+ "kvz": "Tsakwambo; Tsaukambo",
+ "kw": "Cornish",
+ "kwa": "Dâw",
+ "kwb": "Kwa",
+ "kwc": "Likwala",
+ "kwd": "Kwaio",
+ "kwe": "Kwerba",
+ "kwf": "Kwara'ae",
+ "kwg": "Sara Kaba Deme",
+ "kwh": "Kowiai",
+ "kwi": "Awa-Cuaiquer",
+ "kwj": "Kwanga",
+ "kwk": "Kwakiutl",
+ "kwl": "Kofyar",
+ "kwm": "Kwambi",
+ "kwn": "Kwangali",
+ "kwo": "Kwomtari",
+ "kwp": "Kodia",
+ "kwr": "Kwer",
+ "kws": "Kwese",
+ "kwt": "Kwesten",
+ "kwu": "Kwakum",
+ "kwv": "Sara Kaba Náà",
+ "kww": "Kwinti",
+ "kwx": "Khirwar",
+ "kwy": "San Salvador Kongo",
+ "kwz": "Kwadi",
+ "kxa": "Kairiru",
+ "kxb": "Krobu",
+ "kxc": "Konso; Khonso",
+ "kxd": "Brunei",
+ "kxf": "Manumanaw Karen; Manumanaw",
+ "kxh": "Karo (Ethiopia)",
+ "kxi": "Keningau Murut",
+ "kxj": "Kulfa",
+ "kxk": "Zayein Karen",
+ "kxm": "Northern Khmer",
+ "kxn": "Kanowit-Tanjong Melanau",
+ "kxo": "Kanoé",
+ "kxp": "Wadiyara Koli",
+ "kxq": "Smärky Kanum",
+ "kxr": "Koro (Papua New Guinea)",
+ "kxs": "Kangjia",
+ "kxt": "Koiwat",
+ "kxv": "Kuvi",
+ "kxw": "Konai",
+ "kxx": "Likuba",
+ "kxy": "Kayong",
+ "kxz": "Kerewo",
+ "ky": "Kirghiz; Kyrgyz",
+ "kya": "Kwaya",
+ "kyb": "Butbut Kalinga",
+ "kyc": "Kyaka",
+ "kyd": "Karey",
+ "kye": "Krache",
+ "kyf": "Kouya",
+ "kyg": "Keyagana",
+ "kyh": "Karok",
+ "kyi": "Kiput",
+ "kyj": "Karao",
+ "kyk": "Kamayo",
+ "kyl": "Kalapuya",
+ "kym": "Kpatili",
+ "kyn": "Northern Binukidnon",
+ "kyo": "Kelon",
+ "kyp": "Kang",
+ "kyq": "Kenga",
+ "kyr": "Kuruáya",
+ "kys": "Baram Kayan",
+ "kyt": "Kayagar",
+ "kyu": "Western Kayah",
+ "kyv": "Kayort",
+ "kyw": "Kudmali",
+ "kyx": "Rapoisi",
+ "kyy": "Kambaira",
+ "kyz": "Kayabí",
+ "kza": "Western Karaboro",
+ "kzb": "Kaibobo",
+ "kzc": "Bondoukou Kulango",
+ "kzd": "Kadai",
+ "kze": "Kosena",
+ "kzf": "Da'a Kaili",
+ "kzg": "Kikai",
+ "kzi": "Kelabit",
+ "kzk": "Kazukuru",
+ "kzl": "Kayeli",
+ "kzm": "Kais",
+ "kzn": "Kokola",
+ "kzo": "Kaningi",
+ "kzp": "Kaidipang",
+ "kzq": "Kaike",
+ "kzr": "Karang",
+ "kzs": "Sugut Dusun",
+ "kzu": "Kayupulau",
+ "kzv": "Komyandaret",
+ "kzw": "Karirí-Xocó",
+ "kzx": "Kamarian",
+ "kzy": "Kango (Tshopo District)",
+ "kzz": "Kalabra",
+ "la": "Latin",
+ "laa": "Southern Subanen",
+ "lab": "Linear A",
+ "lac": "Lacandon",
+ "lad": "Ladino",
+ "lae": "Pattani",
+ "laf": "Lafofa",
+ "lag": "Langi",
+ "lah": "Lahnda",
+ "lai": "Lambya",
+ "laj": "Lango (Uganda)",
+ "lal": "Lalia",
+ "lam": "Lamba",
+ "lan": "Laru",
+ "lap": "Laka (Chad)",
+ "laq": "Qabiao",
+ "lar": "Larteh",
+ "las": "Lama (Togo)",
+ "lau": "Laba",
+ "law": "Lauje",
+ "lax": "Tiwa",
+ "lay": "Lama Bai",
+ "laz": "Aribwatsa",
+ "lb": "Luxembourgish; Letzeburgesch",
+ "lbb": "Label",
+ "lbc": "Lakkia",
+ "lbe": "Lak",
+ "lbf": "Tinani",
+ "lbg": "Laopang",
+ "lbi": "La'bi",
+ "lbj": "Ladakhi",
+ "lbk": "Central Bontok",
+ "lbl": "Libon Bikol",
+ "lbm": "Lodhi",
+ "lbn": "Rmeet",
+ "lbo": "Laven",
+ "lbq": "Wampar",
+ "lbr": "Lohorung",
+ "lbs": "Libyan Sign Language",
+ "lbt": "Lachi",
+ "lbu": "Labu",
+ "lbv": "Lavatbura-Lamusong",
+ "lbw": "Tolaki",
+ "lbx": "Lawangan",
+ "lby": "Lamalama; Lamu-Lamu",
+ "lbz": "Lardil",
+ "lcc": "Legenyem",
+ "lcd": "Lola",
+ "lce": "Loncong; Sekak",
+ "lcf": "Lubu",
+ "lch": "Luchazi",
+ "lcl": "Lisela",
+ "lcm": "Tungag",
+ "lcp": "Western Lawa",
+ "lcq": "Luhu",
+ "lcs": "Lisabata-Nuniali",
+ "lda": "Kla-Dan",
+ "ldb": "Dũya",
+ "ldd": "Luri",
+ "ldg": "Lenyima",
+ "ldh": "Lamja-Dengsa-Tola",
+ "ldi": "Laari",
+ "ldj": "Lemoro",
+ "ldk": "Leelau",
+ "ldl": "Kaan",
+ "ldm": "Landoma",
+ "ldn": "Láadan",
+ "ldo": "Loo",
+ "ldp": "Tso",
+ "ldq": "Lufu",
+ "lea": "Lega-Shabunda",
+ "leb": "Lala-Bisa",
+ "lec": "Leco",
+ "led": "Lendu",
+ "lee": "Lyélé",
+ "lef": "Lelemi",
+ "leh": "Lenje",
+ "lei": "Lemio",
+ "lej": "Lengola",
+ "lek": "Leipon",
+ "lel": "Lele (Democratic Republic of Congo)",
+ "lem": "Nomaande",
+ "len": "Lenca",
+ "leo": "Leti (Cameroon)",
+ "lep": "Lepcha",
+ "leq": "Lembena",
+ "ler": "Lenkau",
+ "les": "Lese",
+ "let": "Lesing-Gelimi; Amio-Gelimi",
+ "leu": "Kara (Papua New Guinea)",
+ "lev": "Lamma",
+ "lew": "Ledo Kaili",
+ "lex": "Luang",
+ "ley": "Lemolang",
+ "lez": "Lezghian",
+ "lfa": "Lefa",
+ "lfn": "Lingua Franca Nova",
+ "lg": "Ganda; Luganda",
+ "lga": "Lungga",
+ "lgb": "Laghu",
+ "lgg": "Lugbara",
+ "lgh": "Laghuu",
+ "lgi": "Lengilu",
+ "lgk": "Lingarak; Neverver",
+ "lgl": "Wala",
+ "lgm": "Lega-Mwenga",
+ "lgn": "T'apo; Opuuo",
+ "lgo": "Lango (South Sudan)",
+ "lgq": "Logba",
+ "lgr": "Lengo",
+ "lgt": "Pahi",
+ "lgu": "Longgu",
+ "lgz": "Ligenza",
+ "lha": "Laha (Viet Nam)",
+ "lhh": "Laha (Indonesia)",
+ "lhi": "Lahu Shi",
+ "lhl": "Lahul Lohar",
+ "lhm": "Lhomi",
+ "lhn": "Lahanan",
+ "lhp": "Lhokpu",
+ "lhs": "Mlahsö",
+ "lht": "Lo-Toga",
+ "lhu": "Lahu",
+ "li": "Limburgan; Limburger; Limburgish",
+ "lia": "West-Central Limba",
+ "lib": "Likum",
+ "lic": "Hlai",
+ "lid": "Nyindrou",
+ "lie": "Likila",
+ "lif": "Limbu",
+ "lig": "Ligbi",
+ "lih": "Lihir",
+ "lij": "Ligurian",
+ "lik": "Lika",
+ "lil": "Lillooet",
+ "lio": "Liki",
+ "lip": "Sekpele",
+ "liq": "Libido",
+ "lir": "Liberian English",
+ "lis": "Lisu",
+ "liu": "Logorik",
+ "liv": "Liv",
+ "liw": "Col",
+ "lix": "Liabuku",
+ "liy": "Banda-Bambari",
+ "liz": "Libinza",
+ "lja": "Golpa",
+ "lje": "Rampi",
+ "lji": "Laiyolo",
+ "ljl": "Li'o",
+ "ljp": "Lampung Api",
+ "ljw": "Yirandali",
+ "ljx": "Yuru",
+ "lka": "Lakalei",
+ "lkb": "Kabras; Lukabaras",
+ "lkc": "Kucong",
+ "lkd": "Lakondê",
+ "lke": "Kenyi",
+ "lkh": "Lakha",
+ "lki": "Laki",
+ "lkj": "Remun",
+ "lkl": "Laeko-Libuat",
+ "lkm": "Kalaamaya",
+ "lkn": "Lakon; Vure",
+ "lko": "Khayo; Olukhayo",
+ "lkr": "Päri",
+ "lks": "Kisa; Olushisa",
+ "lkt": "Lakota",
+ "lku": "Kungkari",
+ "lky": "Lokoya",
+ "lla": "Lala-Roba",
+ "llb": "Lolo",
+ "llc": "Lele (Guinea)",
+ "lld": "Ladin",
+ "lle": "Lele (Papua New Guinea)",
+ "llf": "Hermit",
+ "llg": "Lole",
+ "llh": "Lamu",
+ "lli": "Teke-Laali",
+ "llj": "Ladji Ladji",
+ "llk": "Lelak",
+ "lll": "Lilau",
+ "llm": "Lasalimu",
+ "lln": "Lele (Chad)",
+ "llp": "North Efate",
+ "llq": "Lolak",
+ "lls": "Lithuanian Sign Language",
+ "llu": "Lau",
+ "llx": "Lauan",
+ "lma": "East Limba",
+ "lmb": "Merei",
+ "lmc": "Limilngan",
+ "lmd": "Lumun",
+ "lme": "Pévé",
+ "lmf": "South Lembata",
+ "lmg": "Lamogai",
+ "lmh": "Lambichhong",
+ "lmi": "Lombi",
+ "lmj": "West Lembata",
+ "lmk": "Lamkang",
+ "lml": "Hano",
+ "lmn": "Lambadi",
+ "lmo": "Lombard",
+ "lmp": "Limbum",
+ "lmq": "Lamatuka",
+ "lmr": "Lamalera",
+ "lmu": "Lamenu",
+ "lmv": "Lomaiviti",
+ "lmw": "Lake Miwok",
+ "lmx": "Laimbue",
+ "lmy": "Lamboya",
+ "ln": "Lingala",
+ "lna": "Langbashe",
+ "lnb": "Mbalanhu",
+ "lnd": "Lundayeh; Lun Bawang",
+ "lng": "Langobardic",
+ "lnh": "Lanoh",
+ "lni": "Daantanai'",
+ "lnj": "Leningitij",
+ "lnl": "South Central Banda",
+ "lnm": "Langam",
+ "lnn": "Lorediakarkar",
+ "lns": "Lamnso'",
+ "lnu": "Longuda",
+ "lnw": "Lanima",
+ "lnz": "Lonzo",
+ "lo": "Lao",
+ "loa": "Loloda",
+ "lob": "Lobi",
+ "loc": "Inonhan",
+ "loe": "Saluan",
+ "lof": "Logol",
+ "log": "Logo",
+ "loh": "Narim",
+ "loi": "Loma (Côte d'Ivoire)",
+ "loj": "Lou",
+ "lok": "Loko",
+ "lol": "Mongo",
+ "lom": "Loma (Liberia)",
+ "lon": "Malawi Lomwe",
+ "loo": "Lombo",
+ "lop": "Lopa",
+ "loq": "Lobala",
+ "lor": "Téén",
+ "los": "Loniu",
+ "lot": "Otuho",
+ "lou": "Louisiana Creole",
+ "lov": "Lopi",
+ "low": "Tampias Lobu",
+ "lox": "Loun",
+ "loy": "Loke",
+ "loz": "Lozi",
+ "lpa": "Lelepa",
+ "lpe": "Lepki",
+ "lpn": "Long Phuri Naga",
+ "lpo": "Lipo",
+ "lpx": "Lopit",
+ "lqr": "Logir",
+ "lra": "Rara Bakati'",
+ "lrc": "Northern Luri",
+ "lre": "Laurentian",
+ "lrg": "Laragia",
+ "lri": "Marachi; Olumarachi",
+ "lrk": "Loarki",
+ "lrl": "Lari",
+ "lrm": "Marama; Olumarama",
+ "lrn": "Lorang",
+ "lro": "Laro",
+ "lrr": "Southern Yamphu",
+ "lrt": "Larantuka Malay",
+ "lrv": "Larevat",
+ "lrz": "Lemerig",
+ "lsa": "Lasgerdi",
+ "lsb": "Burundian Sign Language; Langue des Signes Burundaise",
+ "lsc": "Albarradas Sign Language; Lengua de señas Albarradas",
+ "lsd": "Lishana Deni",
+ "lse": "Lusengo",
+ "lsh": "Lish",
+ "lsi": "Lashi",
+ "lsl": "Latvian Sign Language",
+ "lsm": "Saamia; Olusamia",
+ "lsn": "Tibetan Sign Language",
+ "lso": "Laos Sign Language",
+ "lsp": "Panamanian Sign Language; Lengua de Señas Panameñas",
+ "lsr": "Aruop",
+ "lss": "Lasi",
+ "lst": "Trinidad and Tobago Sign Language",
+ "lsv": "Sivia Sign Language",
+ "lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise",
+ "lsy": "Mauritian Sign Language",
+ "lt": "Lithuanian",
+ "ltc": "Late Middle Chinese",
+ "ltg": "Latgalian",
+ "lth": "Thur",
+ "lti": "Leti (Indonesia)",
+ "ltn": "Latundê",
+ "lto": "Tsotso; Olutsotso",
+ "lts": "Tachoni; Lutachoni",
+ "ltu": "Latu",
+ "lu": "Luba-Katanga",
+ "lua": "Luba-Lulua",
+ "luc": "Aringa",
+ "lud": "Ludian",
+ "lue": "Luvale",
+ "luf": "Laua",
+ "lui": "Luiseno",
+ "luj": "Luna",
+ "luk": "Lunanakha",
+ "lul": "Olu'bo",
+ "lum": "Luimbi",
+ "lun": "Lunda",
+ "luo": "Luo (Kenya and Tanzania); Dholuo",
+ "lup": "Lumbu",
+ "luq": "Lucumi",
+ "lur": "Laura",
+ "lus": "Lushai",
+ "lut": "Lushootseed",
+ "luu": "Lumba-Yakkha",
+ "luv": "Luwati",
+ "luw": "Luo (Cameroon)",
+ "luy": "Luyia; Oluluyia",
+ "luz": "Southern Luri",
+ "lv": "Latvian",
+ "lva": "Maku'a",
+ "lvi": "Lavi",
+ "lvk": "Lavukaleve",
+ "lvs": "Standard Latvian",
+ "lvu": "Levuka",
+ "lwa": "Lwalu",
+ "lwe": "Lewo Eleng",
+ "lwg": "Wanga; Oluwanga",
+ "lwh": "White Lachi",
+ "lwl": "Eastern Lawa",
+ "lwm": "Laomian",
+ "lwo": "Luwo",
+ "lws": "Malawian Sign Language",
+ "lwt": "Lewotobi",
+ "lwu": "Lawu",
+ "lww": "Lewo",
+ "lxm": "Lakurumau",
+ "lya": "Layakha",
+ "lyg": "Lyngngam",
+ "lyn": "Luyana",
+ "lzh": "Literary Chinese",
+ "lzl": "Litzlitz",
+ "lzn": "Leinong Naga",
+ "lzz": "Laz",
+ "maa": "San Jerónimo Tecóatl Mazatec",
+ "mab": "Yutanduchi Mixtec",
+ "mad": "Madurese",
+ "mae": "Bo-Rukul",
+ "maf": "Mafa",
+ "mag": "Magahi",
+ "mai": "Maithili",
+ "maj": "Jalapa De Díaz Mazatec",
+ "mak": "Makasar",
+ "mam": "Mam",
+ "man": "Mandingo; Manding",
+ "map": "Austronesian languages",
+ "maq": "Chiquihuitlán Mazatec",
+ "mas": "Masai",
+ "mat": "San Francisco Matlatzinca",
+ "mau": "Huautla Mazatec",
+ "mav": "Sateré-Mawé",
+ "maw": "Mampruli",
+ "max": "North Moluccan Malay",
+ "maz": "Central Mazahua",
+ "mba": "Higaonon",
+ "mbb": "Western Bukidnon Manobo",
+ "mbc": "Macushi",
+ "mbd": "Dibabawon Manobo",
+ "mbe": "Molale",
+ "mbf": "Baba Malay",
+ "mbh": "Mangseng",
+ "mbi": "Ilianen Manobo",
+ "mbj": "Nadëb",
+ "mbk": "Malol",
+ "mbl": "Maxakalí",
+ "mbm": "Ombamba",
+ "mbn": "Macaguán",
+ "mbo": "Mbo (Cameroon)",
+ "mbp": "Malayo",
+ "mbq": "Maisin",
+ "mbr": "Nukak Makú",
+ "mbs": "Sarangani Manobo",
+ "mbt": "Matigsalug Manobo",
+ "mbu": "Mbula-Bwazza",
+ "mbv": "Mbulungish",
+ "mbw": "Maring",
+ "mbx": "Mari (East Sepik Province)",
+ "mby": "Memoni",
+ "mbz": "Amoltepec Mixtec",
+ "mca": "Maca",
+ "mcb": "Machiguenga",
+ "mcc": "Bitur",
+ "mcd": "Sharanahua",
+ "mce": "Itundujia Mixtec",
+ "mcf": "Matsés",
+ "mcg": "Mapoyo",
+ "mch": "Maquiritari",
+ "mci": "Mese",
+ "mcj": "Mvanip",
+ "mck": "Mbunda",
+ "mcl": "Macaguaje",
+ "mcm": "Malaccan Creole Portuguese",
+ "mcn": "Masana",
+ "mco": "Coatlán Mixe",
+ "mcp": "Makaa",
+ "mcq": "Ese",
+ "mcr": "Menya",
+ "mcs": "Mambai",
+ "mct": "Mengisa",
+ "mcu": "Cameroon Mambila",
+ "mcv": "Minanibai",
+ "mcw": "Mawa (Chad)",
+ "mcx": "Mpiemo",
+ "mcy": "South Watut",
+ "mcz": "Mawan",
+ "mda": "Mada (Nigeria)",
+ "mdb": "Morigi",
+ "mdc": "Male (Papua New Guinea)",
+ "mdd": "Mbum",
+ "mde": "Maba (Chad)",
+ "mdf": "Moksha",
+ "mdg": "Massalat",
+ "mdh": "Maguindanaon",
+ "mdi": "Mamvu",
+ "mdj": "Mangbetu",
+ "mdk": "Mangbutu",
+ "mdl": "Maltese Sign Language",
+ "mdm": "Mayogo",
+ "mdn": "Mbati",
+ "mdp": "Mbala",
+ "mdq": "Mbole",
+ "mdr": "Mandar",
+ "mds": "Maria (Papua New Guinea)",
+ "mdt": "Mbere",
+ "mdu": "Mboko",
+ "mdv": "Santa Lucía Monteverde Mixtec",
+ "mdw": "Mbosi",
+ "mdx": "Dizin",
+ "mdy": "Male (Ethiopia)",
+ "mdz": "Suruí Do Pará",
+ "mea": "Menka",
+ "meb": "Ikobi",
+ "mec": "Marra",
+ "med": "Melpa",
+ "mee": "Mengen",
+ "mef": "Megam",
+ "meh": "Southwestern Tlaxiaco Mixtec",
+ "mei": "Midob",
+ "mej": "Meyah",
+ "mek": "Mekeo",
+ "mel": "Central Melanau",
+ "mem": "Mangala",
+ "men": "Mende (Sierra Leone)",
+ "meo": "Kedah Malay",
+ "mep": "Miriwoong",
+ "meq": "Merey",
+ "mer": "Meru",
+ "mes": "Masmaje",
+ "met": "Mato",
+ "meu": "Motu",
+ "mev": "Mano",
+ "mew": "Maaka",
+ "mey": "Hassaniyya",
+ "mez": "Menominee",
+ "mfa": "Pattani Malay",
+ "mfb": "Bangka",
+ "mfc": "Mba",
+ "mfd": "Mendankwe-Nkwen",
+ "mfe": "Morisyen",
+ "mff": "Naki",
+ "mfg": "Mogofin",
+ "mfh": "Matal",
+ "mfi": "Wandala",
+ "mfj": "Mefele",
+ "mfk": "North Mofu",
+ "mfl": "Putai",
+ "mfm": "Marghi South",
+ "mfn": "Cross River Mbembe",
+ "mfo": "Mbe",
+ "mfp": "Makassar Malay",
+ "mfq": "Moba",
+ "mfr": "Marrithiyel",
+ "mfs": "Mexican Sign Language",
+ "mft": "Mokerang",
+ "mfu": "Mbwela",
+ "mfv": "Mandjak",
+ "mfw": "Mulaha",
+ "mfx": "Melo",
+ "mfy": "Mayo",
+ "mfz": "Mabaan",
+ "mg": "Malagasy",
+ "mga": "Middle Irish (900-1200)",
+ "mgb": "Mararit",
+ "mgc": "Morokodo",
+ "mgd": "Moru",
+ "mge": "Mango",
+ "mgf": "Maklew",
+ "mgg": "Mpumpong",
+ "mgh": "Makhuwa-Meetto",
+ "mgi": "Lijili",
+ "mgj": "Abureni",
+ "mgk": "Mawes",
+ "mgl": "Maleu-Kilenge",
+ "mgm": "Mambae",
+ "mgn": "Mbangi",
+ "mgo": "Meta'",
+ "mgp": "Eastern Magar",
+ "mgq": "Malila",
+ "mgr": "Mambwe-Lungu",
+ "mgs": "Manda (Tanzania)",
+ "mgt": "Mongol",
+ "mgu": "Mailu",
+ "mgv": "Matengo",
+ "mgw": "Matumbi",
+ "mgy": "Mbunga",
+ "mgz": "Mbugwe",
+ "mh": "Marshallese",
+ "mha": "Manda (India)",
+ "mhb": "Mahongwe",
+ "mhc": "Mocho",
+ "mhd": "Mbugu",
+ "mhe": "Besisi; Mah Meri",
+ "mhf": "Mamaa",
+ "mhg": "Margu",
+ "mhi": "Ma'di",
+ "mhj": "Mogholi",
+ "mhk": "Mungaka",
+ "mhl": "Mauwake",
+ "mhm": "Makhuwa-Moniga",
+ "mhn": "Mócheno",
+ "mho": "Mashi (Zambia)",
+ "mhp": "Balinese Malay",
+ "mhq": "Mandan",
+ "mhr": "Eastern Mari",
+ "mhs": "Buru (Indonesia)",
+ "mht": "Mandahuaca",
+ "mhu": "Digaro-Mishmi; Darang Deng",
+ "mhw": "Mbukushu",
+ "mhx": "Maru; Lhaovo",
+ "mhy": "Ma'anyan",
+ "mhz": "Mor (Mor Islands)",
+ "mi": "Maori",
+ "mia": "Miami",
+ "mib": "Atatláhuca Mixtec",
+ "mic": "Mi'kmaq; Micmac",
+ "mid": "Mandaic",
+ "mie": "Ocotepec Mixtec",
+ "mif": "Mofu-Gudur",
+ "mig": "San Miguel El Grande Mixtec",
+ "mih": "Chayuco Mixtec",
+ "mii": "Chigmecatitlán Mixtec",
+ "mij": "Abar; Mungbam",
+ "mik": "Mikasuki",
+ "mil": "Peñoles Mixtec",
+ "mim": "Alacatlatzala Mixtec",
+ "min": "Minangkabau",
+ "mio": "Pinotepa Nacional Mixtec",
+ "mip": "Apasco-Apoala Mixtec",
+ "miq": "Mískito",
+ "mir": "Isthmus Mixe",
+ "mit": "Southern Puebla Mixtec",
+ "miu": "Cacaloxtepec Mixtec",
+ "miw": "Akoye",
+ "mix": "Mixtepec Mixtec",
+ "miy": "Ayutla Mixtec",
+ "miz": "Coatzospan Mixtec",
+ "mjb": "Makalero",
+ "mjc": "San Juan Colorado Mixtec",
+ "mjd": "Northwest Maidu",
+ "mje": "Muskum",
+ "mjg": "Tu",
+ "mjh": "Mwera (Nyasa)",
+ "mji": "Kim Mun",
+ "mjj": "Mawak",
+ "mjk": "Matukar",
+ "mjl": "Mandeali",
+ "mjm": "Medebur",
+ "mjn": "Ma (Papua New Guinea)",
+ "mjo": "Malankuravan",
+ "mjp": "Malapandaram",
+ "mjq": "Malaryan",
+ "mjr": "Malavedan",
+ "mjs": "Miship",
+ "mjt": "Sauria Paharia",
+ "mju": "Manna-Dora",
+ "mjv": "Mannan",
+ "mjw": "Karbi",
+ "mjx": "Mahali",
+ "mjy": "Mahican",
+ "mjz": "Majhi",
+ "mk": "Macedonian",
+ "mka": "Mbre",
+ "mkb": "Mal Paharia",
+ "mkc": "Siliput",
+ "mke": "Mawchi",
+ "mkf": "Miya",
+ "mkg": "Mak (China)",
+ "mkh": "Mon-Khmer languages",
+ "mki": "Dhatki",
+ "mkj": "Mokilese",
+ "mkk": "Byep",
+ "mkl": "Mokole",
+ "mkm": "Moklen",
+ "mkn": "Kupang Malay",
+ "mko": "Mingang Doso",
+ "mkp": "Moikodi",
+ "mkq": "Bay Miwok",
+ "mkr": "Malas",
+ "mks": "Silacayoapan Mixtec",
+ "mkt": "Vamale",
+ "mku": "Konyanka Maninka",
+ "mkv": "Mafea",
+ "mkw": "Kituba (Congo)",
+ "mkx": "Kinamiging Manobo",
+ "mky": "East Makian",
+ "mkz": "Makasae",
+ "ml": "Malayalam",
+ "mla": "Malo",
+ "mlb": "Mbule",
+ "mlc": "Cao Lan",
+ "mle": "Manambu",
+ "mlf": "Mal",
+ "mlh": "Mape",
+ "mli": "Malimpung",
+ "mlj": "Miltu",
+ "mlk": "Ilwana; Kiwilwana",
+ "mll": "Malua Bay",
+ "mlm": "Mulam",
+ "mln": "Malango",
+ "mlo": "Mlomp",
+ "mlp": "Bargam",
+ "mlq": "Western Maninkakan",
+ "mlr": "Vame",
+ "mls": "Masalit",
+ "mlu": "To'abaita",
+ "mlv": "Motlav; Mwotlap",
+ "mlw": "Moloko",
+ "mlx": "Malfaxal; Naha'ai",
+ "mlz": "Malaynon",
+ "mma": "Mama",
+ "mmb": "Momina",
+ "mmc": "Michoacán Mazahua",
+ "mmd": "Maonan",
+ "mme": "Mae",
+ "mmf": "Mundat",
+ "mmg": "North Ambrym",
+ "mmh": "Mehináku",
+ "mmi": "Musar",
+ "mmj": "Majhwar",
+ "mmk": "Mukha-Dora",
+ "mml": "Man Met",
+ "mmm": "Maii",
+ "mmn": "Mamanwa",
+ "mmo": "Mangga Buang",
+ "mmp": "Siawi",
+ "mmq": "Musak",
+ "mmr": "Western Xiangxi Miao",
+ "mmt": "Malalamai",
+ "mmu": "Mmaala",
+ "mmv": "Miriti",
+ "mmw": "Emae",
+ "mmx": "Madak",
+ "mmy": "Migaama",
+ "mmz": "Mabaale",
+ "mn": "Mongolian",
+ "mna": "Mbula",
+ "mnb": "Muna",
+ "mnc": "Manchu",
+ "mnd": "Mondé",
+ "mne": "Naba",
+ "mnf": "Mundani",
+ "mng": "Eastern Mnong",
+ "mnh": "Mono (Democratic Republic of Congo)",
+ "mni": "Manipuri",
+ "mnj": "Munji",
+ "mnk": "Mandinka",
+ "mnl": "Tiale",
+ "mnm": "Mapena",
+ "mnn": "Southern Mnong",
+ "mno": "Manobo languages",
+ "mnp": "Min Bei Chinese",
+ "mnq": "Minriq",
+ "mnr": "Mono (USA)",
+ "mns": "Mansi",
+ "mnu": "Mer",
+ "mnv": "Rennell-Bellona",
+ "mnw": "Mon",
+ "mnx": "Manikion",
+ "mny": "Manyawa",
+ "mnz": "Moni",
+ "moa": "Mwan",
+ "moc": "Mocoví",
+ "mod": "Mobilian",
+ "moe": "Innu; Montagnais",
+ "mog": "Mongondow",
+ "moh": "Mohawk",
+ "moi": "Mboi",
+ "moj": "Monzombo",
+ "mok": "Morori",
+ "mom": "Mangue",
+ "moo": "Monom",
+ "mop": "Mopán Maya",
+ "moq": "Mor (Bomberai Peninsula)",
+ "mor": "Moro",
+ "mos": "Mossi",
+ "mot": "Barí",
+ "mou": "Mogum",
+ "mov": "Mohave",
+ "mow": "Moi (Congo)",
+ "mox": "Molima",
+ "moy": "Shekkacho",
+ "moz": "Mukulu; Gergiko",
+ "mpa": "Mpoto",
+ "mpb": "Malak Malak; Mullukmulluk",
+ "mpc": "Mangarrayi",
+ "mpd": "Machinere",
+ "mpe": "Majang",
+ "mpg": "Marba",
+ "mph": "Maung",
+ "mpi": "Mpade",
+ "mpj": "Martu Wangka; Wangkajunga",
+ "mpk": "Mbara (Chad)",
+ "mpl": "Middle Watut",
+ "mpm": "Yosondúa Mixtec",
+ "mpn": "Mindiri",
+ "mpo": "Miu",
+ "mpp": "Migabac",
+ "mpq": "Matís",
+ "mpr": "Vangunu",
+ "mps": "Dadibi",
+ "mpt": "Mian",
+ "mpu": "Makuráp",
+ "mpv": "Mungkip",
+ "mpw": "Mapidian",
+ "mpx": "Misima-Panaeati",
+ "mpy": "Mapia",
+ "mpz": "Mpi",
+ "mqa": "Maba (Indonesia)",
+ "mqb": "Mbuko",
+ "mqc": "Mangole",
+ "mqe": "Matepi",
+ "mqf": "Momuna",
+ "mqg": "Kota Bangun Kutai Malay",
+ "mqh": "Tlazoyaltepec Mixtec",
+ "mqi": "Mariri",
+ "mqj": "Mamasa",
+ "mqk": "Rajah Kabunsuwan Manobo",
+ "mql": "Mbelime",
+ "mqm": "South Marquesan",
+ "mqn": "Moronene",
+ "mqo": "Modole",
+ "mqp": "Manipa",
+ "mqq": "Minokok",
+ "mqr": "Mander",
+ "mqs": "West Makian",
+ "mqt": "Mok",
+ "mqu": "Mandari",
+ "mqv": "Mosimo",
+ "mqw": "Murupi",
+ "mqx": "Mamuju",
+ "mqy": "Manggarai",
+ "mqz": "Pano",
+ "mr": "Marathi",
+ "mra": "Mlabri",
+ "mrb": "Marino",
+ "mrc": "Maricopa",
+ "mrd": "Western Magar",
+ "mre": "Martha's Vineyard Sign Language",
+ "mrf": "Elseng",
+ "mrg": "Mising",
+ "mrh": "Mara Chin",
+ "mrj": "Western Mari",
+ "mrk": "Hmwaveke",
+ "mrl": "Mortlockese",
+ "mrm": "Merlav; Mwerlap",
+ "mrn": "Cheke Holo",
+ "mro": "Mru",
+ "mrp": "Morouas",
+ "mrq": "North Marquesan",
+ "mrr": "Maria (India)",
+ "mrs": "Maragus",
+ "mrt": "Marghi Central",
+ "mru": "Mono (Cameroon)",
+ "mrv": "Mangareva",
+ "mrw": "Maranao",
+ "mrx": "Maremgi; Dineor",
+ "mry": "Mandaya",
+ "mrz": "Marind",
+ "ms": "Malay (macrolanguage)",
+ "msb": "Masbatenyo",
+ "msc": "Sankaran Maninka",
+ "msd": "Yucatec Maya Sign Language",
+ "mse": "Musey",
+ "msf": "Mekwei",
+ "msg": "Moraid",
+ "msh": "Masikoro Malagasy",
+ "msi": "Sabah Malay",
+ "msj": "Ma (Democratic Republic of Congo)",
+ "msk": "Mansaka",
+ "msl": "Molof; Poule",
+ "msm": "Agusan Manobo",
+ "msn": "Vurës",
+ "mso": "Mombum",
+ "msp": "Maritsauá",
+ "msq": "Caac",
+ "msr": "Mongolian Sign Language",
+ "mss": "West Masela",
+ "msu": "Musom",
+ "msv": "Maslam",
+ "msw": "Mansoanka",
+ "msx": "Moresada",
+ "msy": "Aruamu",
+ "msz": "Momare",
+ "mt": "Maltese",
+ "mta": "Cotabato Manobo",
+ "mtb": "Anyin Morofo",
+ "mtc": "Munit",
+ "mtd": "Mualang",
+ "mte": "Mono (Solomon Islands)",
+ "mtf": "Murik (Papua New Guinea)",
+ "mtg": "Una",
+ "mth": "Munggui",
+ "mti": "Maiwa (Papua New Guinea)",
+ "mtj": "Moskona",
+ "mtk": "Mbe'",
+ "mtl": "Montol",
+ "mtm": "Mator",
+ "mtn": "Matagalpa",
+ "mto": "Totontepec Mixe",
+ "mtp": "Wichí Lhamtés Nocten",
+ "mtq": "Muong",
+ "mtr": "Mewari",
+ "mts": "Yora",
+ "mtt": "Mota",
+ "mtu": "Tututepec Mixtec",
+ "mtv": "Asaro'o",
+ "mtw": "Southern Binukidnon",
+ "mtx": "Tidaá Mixtec",
+ "mty": "Nabi",
+ "mua": "Mundang",
+ "mub": "Mubi",
+ "muc": "Ajumbu",
+ "mud": "Mednyj Aleut",
+ "mue": "Media Lengua",
+ "mug": "Musgu",
+ "muh": "Mündü",
+ "mui": "Musi",
+ "muj": "Mabire",
+ "muk": "Mugom",
+ "mum": "Maiwala",
+ "mun": "Munda languages",
+ "muo": "Nyong",
+ "mup": "Malvi",
+ "muq": "Eastern Xiangxi Miao",
+ "mur": "Murle",
+ "mus": "Creek",
+ "mut": "Western Muria",
+ "muu": "Yaaku",
+ "muv": "Muthuvan",
+ "mux": "Bo-Ung",
+ "muy": "Muyang",
+ "muz": "Mursi",
+ "mva": "Manam",
+ "mvb": "Mattole",
+ "mvd": "Mamboru",
+ "mve": "Marwari (Pakistan)",
+ "mvf": "Peripheral Mongolian",
+ "mvg": "Yucuañe Mixtec",
+ "mvh": "Mulgi",
+ "mvi": "Miyako",
+ "mvk": "Mekmek",
+ "mvl": "Mbara (Australia)",
+ "mvn": "Minaveha",
+ "mvo": "Marovo",
+ "mvp": "Duri",
+ "mvq": "Moere",
+ "mvr": "Marau",
+ "mvs": "Massep",
+ "mvt": "Mpotovoro",
+ "mvu": "Marfa",
+ "mvv": "Tagal Murut",
+ "mvw": "Machinga",
+ "mvx": "Meoswar",
+ "mvy": "Indus Kohistani",
+ "mvz": "Mesqan",
+ "mwa": "Mwatebu",
+ "mwb": "Juwal",
+ "mwc": "Are",
+ "mwe": "Mwera (Chimwera)",
+ "mwf": "Murrinh-Patha",
+ "mwg": "Aiklep",
+ "mwh": "Mouk-Aria",
+ "mwi": "Labo; Ninde",
+ "mwk": "Kita Maninkakan",
+ "mwl": "Mirandese",
+ "mwm": "Sar",
+ "mwn": "Nyamwanga",
+ "mwo": "Central Maewo",
+ "mwp": "Kala Lagaw Ya",
+ "mwq": "Mün Chin",
+ "mwr": "Marwari",
+ "mws": "Mwimbi-Muthambi",
+ "mwt": "Moken",
+ "mwu": "Mittu",
+ "mwv": "Mentawai",
+ "mww": "Hmong Daw",
+ "mwz": "Moingi",
+ "mxa": "Northwest Oaxaca Mixtec",
+ "mxb": "Tezoatlán Mixtec",
+ "mxc": "Manyika",
+ "mxd": "Modang",
+ "mxe": "Mele-Fila",
+ "mxf": "Malgbe",
+ "mxg": "Mbangala",
+ "mxh": "Mvuba",
+ "mxi": "Mozarabic",
+ "mxj": "Miju-Mishmi; Geman Deng",
+ "mxk": "Monumbo",
+ "mxl": "Maxi Gbe",
+ "mxm": "Meramera",
+ "mxn": "Moi (Indonesia)",
+ "mxo": "Mbowe",
+ "mxp": "Tlahuitoltepec Mixe",
+ "mxq": "Juquila Mixe",
+ "mxr": "Murik (Malaysia)",
+ "mxs": "Huitepec Mixtec",
+ "mxt": "Jamiltepec Mixtec",
+ "mxu": "Mada (Cameroon)",
+ "mxv": "Metlatónoc Mixtec",
+ "mxw": "Namo",
+ "mxx": "Mahou; Mawukakan",
+ "mxy": "Southeastern Nochixtlán Mixtec",
+ "mxz": "Central Masela",
+ "my": "Burmese",
+ "myb": "Mbay",
+ "myc": "Mayeka",
+ "mye": "Myene",
+ "myf": "Bambassi",
+ "myg": "Manta",
+ "myh": "Makah",
+ "myj": "Mangayat",
+ "myk": "Mamara Senoufo",
+ "myl": "Moma",
+ "mym": "Me'en",
+ "myn": "Mayan languages",
+ "myo": "Anfillo",
+ "myp": "Pirahã",
+ "myr": "Muniche",
+ "mys": "Mesmes",
+ "myu": "Mundurukú",
+ "myv": "Erzya",
+ "myw": "Muyuw",
+ "myx": "Masaaba",
+ "myy": "Macuna",
+ "myz": "Classical Mandaic",
+ "mza": "Santa María Zacatepec Mixtec",
+ "mzb": "Tumzabt",
+ "mzc": "Madagascar Sign Language",
+ "mzd": "Malimba",
+ "mze": "Morawa",
+ "mzg": "Monastic Sign Language",
+ "mzh": "Wichí Lhamtés Güisnay",
+ "mzi": "Ixcatlán Mazatec",
+ "mzj": "Manya",
+ "mzk": "Nigeria Mambila",
+ "mzl": "Mazatlán Mixe",
+ "mzm": "Mumuye",
+ "mzn": "Mazanderani",
+ "mzo": "Matipuhy",
+ "mzp": "Movima",
+ "mzq": "Mori Atas",
+ "mzr": "Marúbo",
+ "mzs": "Macanese",
+ "mzt": "Mintil",
+ "mzu": "Inapang",
+ "mzv": "Manza",
+ "mzw": "Deg",
+ "mzx": "Mawayana",
+ "mzy": "Mozambican Sign Language",
+ "mzz": "Maiadomu",
+ "na": "Nauru",
+ "naa": "Namla",
+ "nab": "Southern Nambikuára",
+ "nac": "Narak",
+ "nae": "Naka'ela",
+ "naf": "Nabak",
+ "nag": "Naga Pidgin",
+ "nah": "Nahuatl languages",
+ "nai": "North American Indian languages",
+ "naj": "Nalu",
+ "nak": "Nakanai",
+ "nal": "Nalik",
+ "nam": "Ngan'gityemerri",
+ "nan": "Min Nan Chinese",
+ "nao": "Naaba",
+ "nap": "Neapolitan",
+ "naq": "Khoekhoe; Nama (Namibia)",
+ "nar": "Iguta",
+ "nas": "Naasioi",
+ "nat": "Ca̱hungwa̱rya̱; Hungworo",
+ "naw": "Nawuri",
+ "nax": "Nakwi",
+ "nay": "Ngarrindjeri",
+ "naz": "Coatepec Nahuatl",
+ "nb": "Norwegian Bokmål",
+ "nba": "Nyemba",
+ "nbb": "Ndoe",
+ "nbc": "Chang Naga",
+ "nbd": "Ngbinda",
+ "nbe": "Konyak Naga",
+ "nbg": "Nagarchal",
+ "nbh": "Ngamo",
+ "nbi": "Mao Naga",
+ "nbj": "Ngarinyman",
+ "nbk": "Nake",
+ "nbm": "Ngbaka Ma'bo",
+ "nbn": "Kuri",
+ "nbo": "Nkukoli",
+ "nbp": "Nnam",
+ "nbq": "Nggem",
+ "nbr": "Numana",
+ "nbs": "Namibian Sign Language",
+ "nbt": "Na",
+ "nbu": "Rongmei Naga",
+ "nbv": "Ngamambo",
+ "nbw": "Southern Ngbandi",
+ "nby": "Ningera",
+ "nca": "Iyo",
+ "ncb": "Central Nicobarese",
+ "ncc": "Ponam",
+ "ncd": "Nachering",
+ "nce": "Yale",
+ "ncf": "Notsi",
+ "ncg": "Nisga'a",
+ "nch": "Central Huasteca Nahuatl",
+ "nci": "Classical Nahuatl",
+ "ncj": "Northern Puebla Nahuatl",
+ "nck": "Na-kara",
+ "ncl": "Michoacán Nahuatl",
+ "ncm": "Nambo",
+ "ncn": "Nauna",
+ "nco": "Sibe",
+ "ncq": "Northern Katang",
+ "ncr": "Ncane",
+ "ncs": "Nicaraguan Sign Language",
+ "nct": "Chothe Naga",
+ "ncu": "Chumburung",
+ "ncx": "Central Puebla Nahuatl",
+ "ncz": "Natchez",
+ "nd": "North Ndebele",
+ "nda": "Ndasa",
+ "ndb": "Kenswei Nsei",
+ "ndc": "Ndau",
+ "ndd": "Nde-Nsele-Nta",
+ "ndf": "Nadruvian",
+ "ndg": "Ndengereko",
+ "ndh": "Ndali",
+ "ndi": "Samba Leko",
+ "ndj": "Ndamba",
+ "ndk": "Ndaka",
+ "ndl": "Ndolo",
+ "ndm": "Ndam",
+ "ndn": "Ngundi",
+ "ndp": "Ndo",
+ "ndq": "Ndombe",
+ "ndr": "Ndoola",
+ "nds": "Low German; Low Saxon",
+ "ndt": "Ndunga",
+ "ndu": "Dugun",
+ "ndv": "Ndut",
+ "ndw": "Ndobo",
+ "ndx": "Nduga",
+ "ndy": "Lutos",
+ "ndz": "Ndogo",
+ "ne": "Nepali (macrolanguage)",
+ "nea": "Eastern Ngad'a",
+ "neb": "Toura (Côte d'Ivoire)",
+ "nec": "Nedebang",
+ "ned": "Nde-Gbite",
+ "nee": "Nêlêmwa-Nixumwak",
+ "nef": "Nefamese",
+ "neg": "Negidal",
+ "neh": "Nyenkha",
+ "nei": "Neo-Hittite",
+ "nej": "Neko",
+ "nek": "Neku",
+ "nem": "Nemi",
+ "nen": "Nengone",
+ "neo": "Ná-Meo",
+ "neq": "North Central Mixe",
+ "ner": "Yahadian",
+ "nes": "Bhoti Kinnauri",
+ "net": "Nete",
+ "neu": "Neo",
+ "nev": "Nyaheun",
+ "new": "Newari; Nepal Bhasa",
+ "nex": "Neme",
+ "ney": "Neyo",
+ "nez": "Nez Perce",
+ "nfa": "Dhao",
+ "nfd": "Ahwai",
+ "nfl": "Ayiwo; Äiwoo",
+ "nfr": "Nafaanra",
+ "nfu": "Mfumte",
+ "ng": "Ndonga",
+ "nga": "Ngbaka",
+ "ngb": "Northern Ngbandi",
+ "ngc": "Ngombe (Democratic Republic of Congo)",
+ "ngd": "Ngando (Central African Republic)",
+ "nge": "Ngemba",
+ "ngf": "Trans-New Guinea languages",
+ "ngg": "Ngbaka Manza",
+ "ngh": "Nǁng",
+ "ngi": "Ngizim",
+ "ngj": "Ngie",
+ "ngk": "Dalabon",
+ "ngl": "Lomwe",
+ "ngm": "Ngatik Men's Creole",
+ "ngn": "Ngwo",
+ "ngp": "Ngulu",
+ "ngq": "Ngurimi; Ngoreme",
+ "ngr": "Engdewu",
+ "ngs": "Gvoko",
+ "ngt": "Kriang; Ngeq",
+ "ngu": "Guerrero Nahuatl",
+ "ngv": "Nagumi",
+ "ngw": "Ngwaba",
+ "ngx": "Nggwahyi",
+ "ngy": "Tibea",
+ "ngz": "Ngungwel",
+ "nha": "Nhanda",
+ "nhb": "Beng",
+ "nhc": "Tabasco Nahuatl",
+ "nhd": "Chiripá; Ava Guaraní",
+ "nhe": "Eastern Huasteca Nahuatl",
+ "nhf": "Nhuwala",
+ "nhg": "Tetelcingo Nahuatl",
+ "nhh": "Nahari",
+ "nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl",
+ "nhk": "Isthmus-Cosoleacaque Nahuatl",
+ "nhm": "Morelos Nahuatl",
+ "nhn": "Central Nahuatl",
+ "nho": "Takuu",
+ "nhp": "Isthmus-Pajapan Nahuatl",
+ "nhq": "Huaxcaleca Nahuatl",
+ "nhr": "Naro",
+ "nht": "Ometepec Nahuatl",
+ "nhu": "Noone",
+ "nhv": "Temascaltepec Nahuatl",
+ "nhw": "Western Huasteca Nahuatl",
+ "nhx": "Isthmus-Mecayapan Nahuatl",
+ "nhy": "Northern Oaxaca Nahuatl",
+ "nhz": "Santa María La Alta Nahuatl",
+ "nia": "Nias",
+ "nib": "Nakame",
+ "nic": "Niger-Kordofanian languages",
+ "nid": "Ngandi",
+ "nie": "Niellim",
+ "nif": "Nek",
+ "nig": "Ngalakgan",
+ "nih": "Nyiha (Tanzania)",
+ "nii": "Nii",
+ "nij": "Ngaju",
+ "nik": "Southern Nicobarese",
+ "nil": "Nila",
+ "nim": "Nilamba",
+ "nin": "Ninzo",
+ "nio": "Nganasan",
+ "niq": "Nandi",
+ "nir": "Nimboran",
+ "nis": "Nimi",
+ "nit": "Southeastern Kolami",
+ "niu": "Niuean",
+ "niv": "Gilyak",
+ "niw": "Nimo",
+ "nix": "Hema",
+ "niy": "Ngiti",
+ "niz": "Ningil",
+ "nja": "Nzanyi",
+ "njb": "Nocte Naga",
+ "njd": "Ndonde Hamba",
+ "njh": "Lotha Naga",
+ "nji": "Gudanji",
+ "njj": "Njen",
+ "njl": "Njalgulgule",
+ "njm": "Angami Naga",
+ "njn": "Liangmai Naga",
+ "njo": "Ao Naga",
+ "njr": "Njerep",
+ "njs": "Nisa",
+ "njt": "Ndyuka-Trio Pidgin",
+ "nju": "Ngadjunmaya",
+ "njx": "Kunyi",
+ "njy": "Njyem",
+ "njz": "Nyishi",
+ "nka": "Nkoya",
+ "nkb": "Khoibu Naga",
+ "nkc": "Nkongho",
+ "nkd": "Koireng",
+ "nke": "Duke",
+ "nkf": "Inpui Naga",
+ "nkg": "Nekgini",
+ "nkh": "Khezha Naga",
+ "nki": "Thangal Naga",
+ "nkj": "Nakai",
+ "nkk": "Nokuku",
+ "nkm": "Namat",
+ "nkn": "Nkangala",
+ "nko": "Nkonya",
+ "nkp": "Niuatoputapu",
+ "nkq": "Nkami",
+ "nkr": "Nukuoro",
+ "nks": "North Asmat",
+ "nkt": "Nyika (Tanzania)",
+ "nku": "Bouna Kulango",
+ "nkv": "Nyika (Malawi and Zambia)",
+ "nkw": "Nkutu",
+ "nkx": "Nkoroo",
+ "nkz": "Nkari",
+ "nl": "Dutch; Flemish",
+ "nla": "Ngombale",
+ "nlc": "Nalca",
+ "nle": "East Nyala",
+ "nlg": "Gela",
+ "nli": "Grangali",
+ "nlj": "Nyali",
+ "nlk": "Ninia Yali",
+ "nll": "Nihali",
+ "nlm": "Mankiyali",
+ "nlo": "Ngul",
+ "nlq": "Lao Naga",
+ "nlu": "Nchumbulu",
+ "nlv": "Orizaba Nahuatl",
+ "nlw": "Walangama",
+ "nlx": "Nahali",
+ "nly": "Nyamal",
+ "nlz": "Nalögo",
+ "nma": "Maram Naga",
+ "nmb": "Big Nambas; V'ënen Taut",
+ "nmc": "Ngam",
+ "nmd": "Ndumu",
+ "nme": "Mzieme Naga",
+ "nmf": "Tangkhul Naga (India)",
+ "nmg": "Kwasio",
+ "nmh": "Monsang Naga",
+ "nmi": "Nyam",
+ "nmj": "Ngombe (Central African Republic)",
+ "nmk": "Namakura",
+ "nml": "Ndemli",
+ "nmm": "Manangba",
+ "nmn": "ǃXóõ",
+ "nmo": "Moyon Naga",
+ "nmp": "Nimanbur",
+ "nmq": "Nambya",
+ "nmr": "Nimbari",
+ "nms": "Letemboi",
+ "nmt": "Namonuito",
+ "nmu": "Northeast Maidu",
+ "nmv": "Ngamini",
+ "nmw": "Nimoa; Rifao",
+ "nmx": "Nama (Papua New Guinea)",
+ "nmy": "Namuyi",
+ "nmz": "Nawdm",
+ "nn": "Norwegian Nynorsk",
+ "nna": "Nyangumarta",
+ "nnb": "Nande",
+ "nnc": "Nancere",
+ "nnd": "West Ambae",
+ "nne": "Ngandyera",
+ "nnf": "Ngaing",
+ "nng": "Maring Naga",
+ "nnh": "Ngiemboon",
+ "nni": "North Nuaulu",
+ "nnj": "Nyangatom",
+ "nnk": "Nankina",
+ "nnl": "Northern Rengma Naga",
+ "nnm": "Namia",
+ "nnn": "Ngete",
+ "nnp": "Wancho Naga",
+ "nnq": "Ngindo",
+ "nnr": "Narungga",
+ "nnt": "Nanticoke",
+ "nnu": "Dwang",
+ "nnv": "Nugunu (Australia)",
+ "nnw": "Southern Nuni",
+ "nny": "Nyangga",
+ "nnz": "Nda'nda'",
+ "no": "Norwegian",
+ "noa": "Woun Meu",
+ "noc": "Nuk",
+ "nod": "Northern Thai",
+ "noe": "Nimadi",
+ "nof": "Nomane",
+ "nog": "Nogai",
+ "noh": "Nomu",
+ "noi": "Noiri",
+ "noj": "Nonuya",
+ "nok": "Nooksack",
+ "nol": "Nomlaki",
+ "nom": "Nocamán",
+ "non": "Old Norse",
+ "nop": "Numanggang",
+ "noq": "Ngongo",
+ "nos": "Eastern Nisu",
+ "not": "Nomatsiguenga",
+ "nou": "Ewage-Notu",
+ "nov": "Novial",
+ "now": "Nyambo",
+ "noy": "Noy",
+ "noz": "Nayi",
+ "npa": "Nar Phu",
+ "npb": "Nupbikha",
+ "npg": "Ponyo-Gongwang Naga",
+ "nph": "Phom Naga",
+ "npi": "Nepali (individual language)",
+ "npl": "Southeastern Puebla Nahuatl",
+ "npn": "Mondropolon",
+ "npo": "Pochuri Naga",
+ "nps": "Nipsan",
+ "npu": "Puimei Naga",
+ "npx": "Noipx",
+ "npy": "Napu",
+ "nqg": "Southern Nago",
+ "nqk": "Kura Ede Nago",
+ "nql": "Ngendelengo",
+ "nqm": "Ndom",
+ "nqn": "Nen",
+ "nqo": "N'Ko; N’Ko",
+ "nqq": "Kyan-Karyaw Naga",
+ "nqt": "Nteng",
+ "nqy": "Akyaung Ari Naga",
+ "nr": "South Ndebele",
+ "nra": "Ngom",
+ "nrb": "Nara",
+ "nrc": "Noric",
+ "nre": "Southern Rengma Naga",
+ "nrf": "Jèrriais; Guernésiais",
+ "nrg": "Narango",
+ "nri": "Chokri Naga",
+ "nrk": "Ngarla",
+ "nrl": "Ngarluma",
+ "nrm": "Narom",
+ "nrn": "Norn",
+ "nrp": "North Picene",
+ "nrr": "Norra; Nora",
+ "nrt": "Northern Kalapuya",
+ "nru": "Narua",
+ "nrx": "Ngurmbur",
+ "nrz": "Lala",
+ "nsa": "Sangtam Naga",
+ "nsb": "Lower Nossob",
+ "nsc": "Nshi",
+ "nsd": "Southern Nisu",
+ "nse": "Nsenga",
+ "nsf": "Northwestern Nisu",
+ "nsg": "Ngasa",
+ "nsh": "Ngoshie",
+ "nsi": "Nigerian Sign Language",
+ "nsk": "Naskapi",
+ "nsl": "Norwegian Sign Language",
+ "nsm": "Sumi Naga",
+ "nsn": "Nehan",
+ "nso": "Pedi; Northern Sotho; Sepedi",
+ "nsp": "Nepalese Sign Language",
+ "nsq": "Northern Sierra Miwok",
+ "nsr": "Maritime Sign Language",
+ "nss": "Nali",
+ "nst": "Tase Naga",
+ "nsu": "Sierra Negra Nahuatl",
+ "nsv": "Southwestern Nisu",
+ "nsw": "Navut",
+ "nsx": "Nsongo",
+ "nsy": "Nasal",
+ "nsz": "Nisenan",
+ "ntd": "Northern Tidung",
+ "nte": "Nathembo",
+ "ntg": "Ngantangarra",
+ "nti": "Natioro",
+ "ntj": "Ngaanyatjarra",
+ "ntk": "Ikoma-Nata-Isenye",
+ "ntm": "Nateni",
+ "nto": "Ntomba",
+ "ntp": "Northern Tepehuan",
+ "ntr": "Delo",
+ "ntu": "Natügu",
+ "ntw": "Nottoway",
+ "ntx": "Tangkhul Naga (Myanmar)",
+ "nty": "Mantsi",
+ "ntz": "Natanzi",
+ "nua": "Yuanga",
+ "nub": "Nubian languages",
+ "nuc": "Nukuini",
+ "nud": "Ngala",
+ "nue": "Ngundu",
+ "nuf": "Nusu",
+ "nug": "Nungali",
+ "nuh": "Ndunda",
+ "nui": "Ngumbi",
+ "nuj": "Nyole",
+ "nuk": "Nuu-chah-nulth; Nuuchahnulth",
+ "nul": "Nusa Laut",
+ "num": "Niuafo'ou",
+ "nun": "Anong",
+ "nuo": "Nguôn",
+ "nup": "Nupe-Nupe-Tako",
+ "nuq": "Nukumanu",
+ "nur": "Nukuria",
+ "nus": "Nuer",
+ "nut": "Nung (Viet Nam)",
+ "nuu": "Ngbundu",
+ "nuv": "Northern Nuni",
+ "nuw": "Nguluwan",
+ "nux": "Mehek",
+ "nuy": "Nunggubuyu",
+ "nuz": "Tlamacazapa Nahuatl",
+ "nv": "Navajo; Navaho",
+ "nvh": "Nasarian",
+ "nvm": "Namiae",
+ "nvo": "Nyokon",
+ "nwa": "Nawathinehena",
+ "nwb": "Nyabwa",
+ "nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari",
+ "nwe": "Ngwe",
+ "nwg": "Ngayawung",
+ "nwi": "Southwest Tanna",
+ "nwm": "Nyamusa-Molo",
+ "nwo": "Nauo",
+ "nwr": "Nawaru",
+ "nww": "Ndwewe",
+ "nwx": "Middle Newar",
+ "nwy": "Nottoway-Meherrin",
+ "nxa": "Nauete",
+ "nxd": "Ngando (Democratic Republic of Congo)",
+ "nxe": "Nage",
+ "nxg": "Ngad'a",
+ "nxi": "Nindi",
+ "nxk": "Koki Naga",
+ "nxl": "South Nuaulu",
+ "nxm": "Numidian",
+ "nxn": "Ngawun",
+ "nxo": "Ndambomo",
+ "nxq": "Naxi",
+ "nxr": "Ninggerum",
+ "nxx": "Nafri",
+ "ny": "Nyanja; Chewa; Chichewa",
+ "nyb": "Nyangbo",
+ "nyc": "Nyanga-li",
+ "nyd": "Nyore; Olunyole",
+ "nye": "Nyengo",
+ "nyf": "Giryama; Kigiryama",
+ "nyg": "Nyindu",
+ "nyh": "Nyikina",
+ "nyi": "Ama (Sudan)",
+ "nyj": "Nyanga",
+ "nyk": "Nyaneka",
+ "nyl": "Nyeu",
+ "nym": "Nyamwezi",
+ "nyn": "Nyankole",
+ "nyo": "Nyoro",
+ "nyp": "Nyang'i",
+ "nyq": "Nayini",
+ "nyr": "Nyiha (Malawi)",
+ "nys": "Nyungar",
+ "nyt": "Nyawaygi",
+ "nyu": "Nyungwe",
+ "nyv": "Nyulnyul",
+ "nyw": "Nyaw",
+ "nyx": "Nganyaywana",
+ "nyy": "Nyakyusa-Ngonde",
+ "nza": "Tigon Mbembe",
+ "nzb": "Njebi",
+ "nzd": "Nzadi",
+ "nzi": "Nzima",
+ "nzk": "Nzakara",
+ "nzm": "Zeme Naga",
+ "nzs": "New Zealand Sign Language",
+ "nzu": "Teke-Nzikou",
+ "nzy": "Nzakambay",
+ "nzz": "Nanga Dama Dogon",
+ "oaa": "Orok",
+ "oac": "Oroch",
+ "oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)",
+ "oav": "Old Avar",
+ "obi": "Obispeño",
+ "obk": "Southern Bontok",
+ "obl": "Oblo",
+ "obm": "Moabite",
+ "obo": "Obo Manobo",
+ "obr": "Old Burmese",
+ "obt": "Old Breton",
+ "obu": "Obulom",
+ "oc": "Occitan (post 1500)",
+ "oca": "Ocaina",
+ "och": "Old Chinese",
+ "ocm": "Old Cham",
+ "oco": "Old Cornish",
+ "ocu": "Atzingo Matlatzinca",
+ "oda": "Odut",
+ "odk": "Od",
+ "odt": "Old Dutch",
+ "odu": "Odual",
+ "ofo": "Ofo",
+ "ofs": "Old Frisian",
+ "ofu": "Efutop",
+ "ogb": "Ogbia",
+ "ogc": "Ogbah",
+ "oge": "Old Georgian",
+ "ogg": "Ogbogolo",
+ "ogo": "Khana",
+ "ogu": "Ogbronuagum",
+ "oht": "Old Hittite",
+ "ohu": "Old Hungarian",
+ "oia": "Oirata",
+ "oie": "Okolie",
+ "oin": "Inebu One",
+ "oj": "Ojibwa",
+ "ojb": "Northwestern Ojibwa",
+ "ojc": "Central Ojibwa",
+ "ojg": "Eastern Ojibwa",
+ "ojp": "Old Japanese",
+ "ojs": "Severn Ojibwa",
+ "ojv": "Ontong Java",
+ "ojw": "Western Ojibwa",
+ "oka": "Okanagan",
+ "okb": "Okobo",
+ "okc": "Kobo",
+ "okd": "Okodia",
+ "oke": "Okpe (Southwestern Edo)",
+ "okg": "Koko Babangk",
+ "okh": "Koresh-e Rostam",
+ "oki": "Okiek",
+ "okj": "Oko-Juwoi",
+ "okk": "Kwamtim One",
+ "okl": "Old Kentish Sign Language",
+ "okm": "Middle Korean (10th-16th cent.)",
+ "okn": "Oki-No-Erabu",
+ "oko": "Old Korean (3rd-9th cent.)",
+ "okr": "Kirike",
+ "oks": "Oko-Eni-Osayen",
+ "oku": "Oku",
+ "okv": "Orokaiva",
+ "okx": "Okpe (Northwestern Edo)",
+ "okz": "Old Khmer",
+ "ola": "Walungge",
+ "old": "Mochi",
+ "ole": "Olekha",
+ "olk": "Olkol",
+ "olm": "Oloma",
+ "olo": "Livvi",
+ "olr": "Olrat",
+ "olt": "Old Lithuanian",
+ "olu": "Kuvale",
+ "om": "Oromo",
+ "oma": "Omaha-Ponca",
+ "omb": "East Ambae",
+ "omc": "Mochica",
+ "omg": "Omagua",
+ "omi": "Omi",
+ "omk": "Omok",
+ "oml": "Ombo",
+ "omn": "Minoan",
+ "omo": "Utarmbung",
+ "omp": "Old Manipuri",
+ "omq": "Oto-Manguean languages",
+ "omr": "Old Marathi",
+ "omt": "Omotik",
+ "omu": "Omurano",
+ "omv": "Omotic languages",
+ "omw": "South Tairora",
+ "omx": "Old Mon",
+ "omy": "Old Malay",
+ "ona": "Ona",
+ "onb": "Lingao",
+ "one": "Oneida",
+ "ong": "Olo",
+ "oni": "Onin",
+ "onj": "Onjob",
+ "onk": "Kabore One",
+ "onn": "Onobasulu",
+ "ono": "Onondaga",
+ "onp": "Sartang",
+ "onr": "Northern One",
+ "ons": "Ono",
+ "ont": "Ontenu",
+ "onu": "Unua",
+ "onw": "Old Nubian",
+ "onx": "Onin Based Pidgin",
+ "ood": "Tohono O'odham",
+ "oog": "Ong",
+ "oon": "Önge",
+ "oor": "Oorlams",
+ "oos": "Old Ossetic",
+ "opa": "Okpamheri",
+ "opk": "Kopkaka",
+ "opm": "Oksapmin",
+ "opo": "Opao",
+ "opt": "Opata",
+ "opy": "Ofayé",
+ "or": "Oriya (macrolanguage); Odia (macrolanguage)",
+ "ora": "Oroha",
+ "orc": "Orma",
+ "ore": "Orejón",
+ "org": "Oring",
+ "orh": "Oroqen",
+ "orn": "Orang Kanaq",
+ "oro": "Orokolo",
+ "orr": "Oruma",
+ "ors": "Orang Seletar",
+ "ort": "Adivasi Oriya",
+ "oru": "Ormuri",
+ "orv": "Old Russian",
+ "orw": "Oro Win",
+ "orx": "Oro",
+ "ory": "Odia (individual language); Oriya (individual language)",
+ "orz": "Ormu",
+ "os": "Ossetian; Ossetic",
+ "osa": "Osage",
+ "osc": "Oscan",
+ "osi": "Osing",
+ "osn": "Old Sundanese",
+ "oso": "Ososo",
+ "osp": "Old Spanish",
+ "ost": "Osatu",
+ "osu": "Southern One",
+ "osx": "Old Saxon",
+ "ota": "Ottoman Turkish (1500-1928)",
+ "otb": "Old Tibetan",
+ "otd": "Ot Danum",
+ "ote": "Mezquital Otomi",
+ "oti": "Oti",
+ "otk": "Old Turkish",
+ "otl": "Tilapa Otomi",
+ "otm": "Eastern Highland Otomi",
+ "otn": "Tenango Otomi",
+ "oto": "Otomian languages",
+ "otq": "Querétaro Otomi",
+ "otr": "Otoro",
+ "ots": "Estado de México Otomi",
+ "ott": "Temoaya Otomi",
+ "otu": "Otuke",
+ "otw": "Ottawa",
+ "otx": "Texcatepec Otomi",
+ "oty": "Old Tamil",
+ "otz": "Ixtenco Otomi",
+ "oua": "Tagargrent",
+ "oub": "Glio-Oubi",
+ "oue": "Oune",
+ "oui": "Old Uighur",
+ "oum": "Ouma",
+ "ovd": "Elfdalian; Övdalian",
+ "owi": "Owiniga",
+ "owl": "Old Welsh",
+ "oyb": "Oy",
+ "oyd": "Oyda",
+ "oym": "Wayampi",
+ "oyy": "Oya'oya",
+ "ozm": "Koonzime",
+ "pa": "Panjabi; Punjabi",
+ "paa": "Papuan languages",
+ "pab": "Parecís",
+ "pac": "Pacoh",
+ "pad": "Paumarí",
+ "pae": "Pagibete",
+ "paf": "Paranawát",
+ "pag": "Pangasinan",
+ "pah": "Tenharim",
+ "pai": "Pe",
+ "pak": "Parakanã",
+ "pal": "Pahlavi",
+ "pam": "Pampanga; Kapampangan",
+ "pao": "Northern Paiute",
+ "pap": "Papiamento",
+ "paq": "Parya",
+ "par": "Panamint; Timbisha",
+ "pas": "Papasena",
+ "pau": "Palauan",
+ "pav": "Pakaásnovos",
+ "paw": "Pawnee",
+ "pax": "Pankararé",
+ "pay": "Pech",
+ "paz": "Pankararú",
+ "pbb": "Páez",
+ "pbc": "Patamona",
+ "pbe": "Mezontla Popoloca",
+ "pbf": "Coyotepec Popoloca",
+ "pbg": "Paraujano",
+ "pbh": "E'ñapa Woromaipu",
+ "pbi": "Parkwa",
+ "pbl": "Mak (Nigeria)",
+ "pbm": "Puebla Mazatec",
+ "pbn": "Kpasam",
+ "pbo": "Papel",
+ "pbp": "Badyara",
+ "pbr": "Pangwa",
+ "pbs": "Central Pame",
+ "pbt": "Southern Pashto",
+ "pbu": "Northern Pashto",
+ "pbv": "Pnar",
+ "pby": "Pyu (Papua New Guinea)",
+ "pca": "Santa Inés Ahuatempan Popoloca",
+ "pcb": "Pear",
+ "pcc": "Bouyei",
+ "pcd": "Picard",
+ "pce": "Ruching Palaung",
+ "pcf": "Paliyan",
+ "pcg": "Paniya",
+ "pch": "Pardhan",
+ "pci": "Duruwa",
+ "pcj": "Parenga",
+ "pck": "Paite Chin",
+ "pcl": "Pardhi",
+ "pcm": "Nigerian Pidgin",
+ "pcn": "Piti",
+ "pcp": "Pacahuara",
+ "pcw": "Pyapun",
+ "pda": "Anam",
+ "pdc": "Pennsylvania German",
+ "pdi": "Pa Di",
+ "pdn": "Podena; Fedan",
+ "pdo": "Padoe",
+ "pdt": "Plautdietsch",
+ "pdu": "Kayan",
+ "pea": "Peranakan Indonesian",
+ "peb": "Eastern Pomo",
+ "ped": "Mala (Papua New Guinea)",
+ "pee": "Taje",
+ "pef": "Northeastern Pomo",
+ "peg": "Pengo",
+ "peh": "Bonan",
+ "pei": "Chichimeca-Jonaz",
+ "pej": "Northern Pomo",
+ "pek": "Penchal",
+ "pel": "Pekal",
+ "pem": "Phende",
+ "peo": "Old Persian (ca. 600-400 B.C.)",
+ "pep": "Kunja",
+ "peq": "Southern Pomo",
+ "pes": "Iranian Persian",
+ "pev": "Pémono",
+ "pex": "Petats",
+ "pey": "Petjo",
+ "pez": "Eastern Penan",
+ "pfa": "Pááfang",
+ "pfe": "Pere",
+ "pfl": "Pfaelzisch",
+ "pga": "Sudanese Creole Arabic",
+ "pgd": "Gāndhārī",
+ "pgg": "Pangwali",
+ "pgi": "Pagi",
+ "pgk": "Rerep",
+ "pgl": "Primitive Irish",
+ "pgn": "Paelignian",
+ "pgs": "Pangseng",
+ "pgu": "Pagu",
+ "pgz": "Papua New Guinean Sign Language",
+ "pha": "Pa-Hng",
+ "phd": "Phudagi",
+ "phg": "Phuong",
+ "phh": "Phukha",
+ "phi": "Philippine languages",
+ "phj": "Pahari",
+ "phk": "Phake",
+ "phl": "Phalura; Palula",
+ "phm": "Phimbi",
+ "phn": "Phoenician",
+ "pho": "Phunoi",
+ "phq": "Phana'",
+ "phr": "Pahari-Potwari",
+ "pht": "Phu Thai",
+ "phu": "Phuan",
+ "phv": "Pahlavani",
+ "phw": "Phangduwali",
+ "pi": "Pali",
+ "pia": "Pima Bajo",
+ "pib": "Yine",
+ "pic": "Pinji",
+ "pid": "Piaroa",
+ "pie": "Piro",
+ "pif": "Pingelapese",
+ "pig": "Pisabo",
+ "pih": "Pitcairn-Norfolk",
+ "pij": "Pijao",
+ "pil": "Yom",
+ "pim": "Powhatan",
+ "pin": "Piame",
+ "pio": "Piapoco",
+ "pip": "Pero",
+ "pir": "Piratapuyo",
+ "pis": "Pijin",
+ "pit": "Pitta Pitta",
+ "piu": "Pintupi-Luritja",
+ "piv": "Pileni; Vaeakau-Taumako",
+ "piw": "Pimbwe",
+ "pix": "Piu",
+ "piy": "Piya-Kwonci",
+ "piz": "Pije",
+ "pjt": "Pitjantjatjara",
+ "pka": "Ardhamāgadhī Prākrit",
+ "pkb": "Pokomo; Kipfokomo",
+ "pkc": "Paekche",
+ "pkg": "Pak-Tong",
+ "pkh": "Pankhu",
+ "pkn": "Pakanha",
+ "pko": "Pökoot",
+ "pkp": "Pukapuka",
+ "pkr": "Attapady Kurumba",
+ "pks": "Pakistan Sign Language",
+ "pkt": "Maleng",
+ "pku": "Paku",
+ "pl": "Polish",
+ "pla": "Miani",
+ "plb": "Polonombauk",
+ "plc": "Central Palawano",
+ "pld": "Polari",
+ "ple": "Palu'e",
+ "plf": "Central Malayo-Polynesian languages",
+ "plg": "Pilagá",
+ "plh": "Paulohi",
+ "plj": "Polci",
+ "plk": "Kohistani Shina",
+ "pll": "Shwe Palaung",
+ "pln": "Palenquero",
+ "plo": "Oluta Popoluca",
+ "plq": "Palaic",
+ "plr": "Palaka Senoufo",
+ "pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca",
+ "plt": "Plateau Malagasy",
+ "plu": "Palikúr",
+ "plv": "Southwest Palawano",
+ "plw": "Brooke's Point Palawano",
+ "ply": "Bolyu",
+ "plz": "Paluan",
+ "pma": "Paama",
+ "pmb": "Pambia",
+ "pmd": "Pallanganmiddang",
+ "pme": "Pwaamei",
+ "pmf": "Pamona",
+ "pmh": "Māhārāṣṭri Prākrit",
+ "pmi": "Northern Pumi",
+ "pmj": "Southern Pumi",
+ "pmk": "Pamlico",
+ "pml": "Lingua Franca",
+ "pmm": "Pomo",
+ "pmn": "Pam",
+ "pmo": "Pom",
+ "pmq": "Northern Pame",
+ "pmr": "Paynamar",
+ "pms": "Piemontese",
+ "pmt": "Tuamotuan",
+ "pmw": "Plains Miwok",
+ "pmx": "Poumei Naga",
+ "pmy": "Papuan Malay",
+ "pmz": "Southern Pame",
+ "pna": "Punan Bah-Biau",
+ "pnb": "Western Panjabi",
+ "pnc": "Pannei",
+ "pnd": "Mpinda",
+ "pne": "Western Penan",
+ "png": "Pangu; Pongu",
+ "pnh": "Penrhyn",
+ "pni": "Aoheng",
+ "pnj": "Pinjarup",
+ "pnk": "Paunaka",
+ "pnl": "Paleni",
+ "pnm": "Punan Batu 1",
+ "pnn": "Pinai-Hagahai",
+ "pno": "Panobo",
+ "pnp": "Pancana",
+ "pnq": "Pana (Burkina Faso)",
+ "pnr": "Panim",
+ "pns": "Ponosakan",
+ "pnt": "Pontic",
+ "pnu": "Jiongnai Bunu",
+ "pnv": "Pinigura",
+ "pnw": "Banyjima; Panytyima",
+ "pnx": "Phong-Kniang",
+ "pny": "Pinyin",
+ "pnz": "Pana (Central African Republic)",
+ "poc": "Poqomam",
+ "poe": "San Juan Atzingo Popoloca",
+ "pof": "Poke",
+ "pog": "Potiguára",
+ "poh": "Poqomchi'",
+ "poi": "Highland Popoluca",
+ "pok": "Pokangá",
+ "pom": "Southeastern Pomo",
+ "pon": "Pohnpeian",
+ "poo": "Central Pomo",
+ "pop": "Pwapwâ",
+ "poq": "Texistepec Popoluca",
+ "pos": "Sayula Popoluca",
+ "pot": "Potawatomi",
+ "pov": "Upper Guinea Crioulo",
+ "pow": "San Felipe Otlaltepec Popoloca",
+ "pox": "Polabian",
+ "poy": "Pogolo",
+ "poz": "Malayo-Polynesian languages",
+ "ppe": "Papi",
+ "ppi": "Paipai",
+ "ppk": "Uma",
+ "ppl": "Pipil; Nicarao",
+ "ppm": "Papuma",
+ "ppn": "Papapana",
+ "ppo": "Folopa",
+ "ppp": "Pelende",
+ "ppq": "Pei",
+ "pps": "San Luís Temalacayuca Popoloca",
+ "ppt": "Pare",
+ "ppu": "Papora",
+ "pqa": "Pa'a",
+ "pqe": "Eastern Malayo-Polynesian languages",
+ "pqm": "Malecite-Passamaquoddy",
+ "pqw": "Western Malayo-Polynesian languages",
+ "pra": "Prakrit languages",
+ "prc": "Parachi",
+ "prd": "Parsi-Dari",
+ "pre": "Principense",
+ "prf": "Paranan",
+ "prg": "Prussian",
+ "prh": "Porohanon",
+ "pri": "Paicî",
+ "prk": "Parauk",
+ "prl": "Peruvian Sign Language",
+ "prm": "Kibiri",
+ "prn": "Prasuni",
+ "pro": "Old Provençal (to 1500); Old Occitan (to 1500)",
+ "prp": "Parsi",
+ "prq": "Ashéninka Perené",
+ "prr": "Puri",
+ "prs": "Dari; Afghan Persian",
+ "prt": "Phai",
+ "pru": "Puragi",
+ "prw": "Parawen",
+ "prx": "Purik",
+ "prz": "Providencia Sign Language",
+ "ps": "Pushto; Pashto",
+ "psa": "Asue Awyu",
+ "psc": "Iranian Sign Language; Persian Sign Language",
+ "psd": "Plains Indian Sign Language",
+ "pse": "Central Malay",
+ "psg": "Penang Sign Language",
+ "psh": "Southwest Pashai; Southwest Pashayi",
+ "psi": "Southeast Pashai; Southeast Pashayi",
+ "psl": "Puerto Rican Sign Language",
+ "psm": "Pauserna",
+ "psn": "Panasuan",
+ "pso": "Polish Sign Language",
+ "psp": "Philippine Sign Language",
+ "psq": "Pasi",
+ "psr": "Portuguese Sign Language",
+ "pss": "Kaulong",
+ "pst": "Central Pashto",
+ "psu": "Sauraseni Prākrit",
+ "psw": "Port Sandwich",
+ "psy": "Piscataway",
+ "pt": "Portuguese",
+ "pta": "Pai Tavytera",
+ "pth": "Pataxó Hã-Ha-Hãe",
+ "pti": "Pindiini; Wangkatha",
+ "ptn": "Patani",
+ "pto": "Zo'é",
+ "ptp": "Patep",
+ "ptq": "Pattapu",
+ "ptr": "Piamatsina",
+ "ptt": "Enrekang",
+ "ptu": "Bambam",
+ "ptv": "Port Vato",
+ "ptw": "Pentlatch",
+ "pty": "Pathiya",
+ "pua": "Western Highland Purepecha",
+ "pub": "Purum",
+ "puc": "Punan Merap",
+ "pud": "Punan Aput",
+ "pue": "Puelche",
+ "puf": "Punan Merah",
+ "pug": "Phuie",
+ "pui": "Puinave",
+ "puj": "Punan Tubu",
+ "pum": "Puma",
+ "puo": "Puoc",
+ "pup": "Pulabu",
+ "puq": "Puquina",
+ "pur": "Puruborá",
+ "put": "Putoh",
+ "puu": "Punu",
+ "puw": "Puluwatese",
+ "pux": "Puare",
+ "puy": "Purisimeño",
+ "pwa": "Pawaia",
+ "pwb": "Panawa",
+ "pwg": "Gapapaiwa",
+ "pwi": "Patwin",
+ "pwm": "Molbog",
+ "pwn": "Paiwan",
+ "pwo": "Pwo Western Karen",
+ "pwr": "Powari",
+ "pww": "Pwo Northern Karen",
+ "pxm": "Quetzaltepec Mixe",
+ "pye": "Pye Krumen",
+ "pym": "Fyam",
+ "pyn": "Poyanáwa",
+ "pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay",
+ "pyu": "Puyuma",
+ "pyx": "Pyu (Myanmar)",
+ "pyy": "Pyen",
+ "pzh": "Pazeh",
+ "pzn": "Jejara Naga; Para Naga",
+ "qu": "Quechua",
+ "qua": "Quapaw",
+ "qub": "Huallaga Huánuco Quechua",
+ "quc": "K'iche'; Quiché",
+ "qud": "Calderón Highland Quichua",
+ "quf": "Lambayeque Quechua",
+ "qug": "Chimborazo Highland Quichua",
+ "quh": "South Bolivian Quechua",
+ "qui": "Quileute",
+ "quk": "Chachapoyas Quechua",
+ "qul": "North Bolivian Quechua",
+ "qum": "Sipacapense",
+ "qun": "Quinault",
+ "qup": "Southern Pastaza Quechua",
+ "quq": "Quinqui",
+ "qur": "Yanahuanca Pasco Quechua",
+ "qus": "Santiago del Estero Quichua",
+ "quv": "Sacapulteco",
+ "quw": "Tena Lowland Quichua",
+ "qux": "Yauyos Quechua",
+ "quy": "Ayacucho Quechua",
+ "quz": "Cusco Quechua",
+ "qva": "Ambo-Pasco Quechua",
+ "qvc": "Cajamarca Quechua",
+ "qve": "Eastern Apurímac Quechua",
+ "qvh": "Huamalíes-Dos de Mayo Huánuco Quechua",
+ "qvi": "Imbabura Highland Quichua",
+ "qvj": "Loja Highland Quichua",
+ "qvl": "Cajatambo North Lima Quechua",
+ "qvm": "Margos-Yarowilca-Lauricocha Quechua",
+ "qvn": "North Junín Quechua",
+ "qvo": "Napo Lowland Quechua",
+ "qvp": "Pacaraos Quechua",
+ "qvs": "San Martín Quechua",
+ "qvw": "Huaylla Wanca Quechua",
+ "qvy": "Queyu",
+ "qvz": "Northern Pastaza Quichua",
+ "qwa": "Corongo Ancash Quechua",
+ "qwc": "Classical Quechua",
+ "qwe": "Quechuan (family)",
+ "qwh": "Huaylas Ancash Quechua",
+ "qwm": "Kuman (Russia)",
+ "qws": "Sihuas Ancash Quechua",
+ "qwt": "Kwalhioqua-Tlatskanai",
+ "qxa": "Chiquián Ancash Quechua",
+ "qxc": "Chincha Quechua",
+ "qxh": "Panao Huánuco Quechua",
+ "qxl": "Salasaca Highland Quichua",
+ "qxn": "Northern Conchucos Ancash Quechua",
+ "qxo": "Southern Conchucos Ancash Quechua",
+ "qxp": "Puno Quechua",
+ "qxq": "Qashqa'i",
+ "qxr": "Cañar Highland Quichua",
+ "qxs": "Southern Qiang",
+ "qxt": "Santa Ana de Tusi Pasco Quechua",
+ "qxu": "Arequipa-La Unión Quechua",
+ "qxw": "Jauja Wanca Quechua",
+ "qya": "Quenya",
+ "qyp": "Quiripi",
+ "raa": "Dungmali",
+ "rab": "Camling",
+ "rac": "Rasawa",
+ "rad": "Rade",
+ "raf": "Western Meohang",
+ "rag": "Logooli; Lulogooli",
+ "rah": "Rabha",
+ "rai": "Ramoaaina",
+ "raj": "Rajasthani",
+ "rak": "Tulu-Bohuai",
+ "ral": "Ralte",
+ "ram": "Canela",
+ "ran": "Riantana",
+ "rao": "Rao",
+ "rap": "Rapanui",
+ "raq": "Saam",
+ "rar": "Rarotongan; Cook Islands Maori",
+ "ras": "Tegali",
+ "rat": "Razajerdi",
+ "rau": "Raute",
+ "rav": "Sampang",
+ "raw": "Rawang",
+ "rax": "Rang",
+ "ray": "Rapa",
+ "raz": "Rahambuu",
+ "rbb": "Rumai Palaung",
+ "rbk": "Northern Bontok",
+ "rbl": "Miraya Bikol",
+ "rbp": "Barababaraba",
+ "rcf": "Réunion Creole French",
+ "rdb": "Rudbari",
+ "rea": "Rerau",
+ "reb": "Rembong",
+ "ree": "Rejang Kayan",
+ "reg": "Kara (Tanzania)",
+ "rei": "Reli",
+ "rej": "Rejang",
+ "rel": "Rendille",
+ "rem": "Remo",
+ "ren": "Rengao",
+ "rer": "Rer Bare",
+ "res": "Reshe",
+ "ret": "Retta",
+ "rey": "Reyesano",
+ "rga": "Roria",
+ "rge": "Romano-Greek",
+ "rgk": "Rangkas",
+ "rgn": "Romagnol",
+ "rgr": "Resígaro",
+ "rgs": "Southern Roglai",
+ "rgu": "Ringgou",
+ "rhg": "Rohingya",
+ "rhp": "Yahang",
+ "ria": "Riang (India)",
+ "rib": "Bribri Sign Language",
+ "rif": "Tarifit",
+ "ril": "Riang Lang; Riang (Myanmar)",
+ "rim": "Nyaturu",
+ "rin": "Nungu",
+ "rir": "Ribun",
+ "rit": "Ritharrngu",
+ "riu": "Riung",
+ "rjg": "Rajong",
+ "rji": "Raji",
+ "rjs": "Rajbanshi",
+ "rka": "Kraol",
+ "rkb": "Rikbaktsa",
+ "rkh": "Rakahanga-Manihiki",
+ "rki": "Rakhine",
+ "rkm": "Marka",
+ "rkt": "Rangpuri; Kamta",
+ "rkw": "Arakwal",
+ "rm": "Romansh",
+ "rma": "Rama",
+ "rmb": "Rembarrnga",
+ "rmc": "Carpathian Romani",
+ "rmd": "Traveller Danish",
+ "rme": "Angloromani",
+ "rmf": "Kalo Finnish Romani",
+ "rmg": "Traveller Norwegian",
+ "rmh": "Murkim",
+ "rmi": "Lomavren",
+ "rmk": "Romkun",
+ "rml": "Baltic Romani",
+ "rmm": "Roma",
+ "rmn": "Balkan Romani",
+ "rmo": "Sinte Romani",
+ "rmp": "Rempi",
+ "rmq": "Caló",
+ "rms": "Romanian Sign Language",
+ "rmt": "Domari",
+ "rmu": "Tavringer Romani",
+ "rmv": "Romanova",
+ "rmw": "Welsh Romani",
+ "rmx": "Romam",
+ "rmy": "Vlax Romani",
+ "rmz": "Marma",
+ "rn": "Rundi",
+ "rnb": "Brunca Sign Language",
+ "rnd": "Ruund",
+ "rng": "Ronga",
+ "rnl": "Ranglong",
+ "rnn": "Roon",
+ "rnp": "Rongpo",
+ "rnr": "Nari Nari",
+ "rnw": "Rungwa",
+ "ro": "Romanian; Moldavian; Moldovan",
+ "roa": "Romance languages",
+ "rob": "Tae'",
+ "roc": "Cacgia Roglai",
+ "rod": "Rogo",
+ "roe": "Ronji",
+ "rof": "Rombo",
+ "rog": "Northern Roglai",
+ "rol": "Romblomanon",
+ "rom": "Romany",
+ "roo": "Rotokas",
+ "rop": "Kriol",
+ "ror": "Rongga",
+ "rou": "Runga",
+ "row": "Dela-Oenale",
+ "rpn": "Repanbitip",
+ "rpt": "Rapting",
+ "rri": "Ririo",
+ "rro": "Waima",
+ "rrt": "Arritinngithigh",
+ "rsb": "Romano-Serbian",
+ "rsk": "Ruthenian; Rusyn",
+ "rsl": "Russian Sign Language",
+ "rsm": "Miriwoong Sign Language",
+ "rsn": "Rwandan Sign Language",
+ "rtc": "Rungtu Chin",
+ "rth": "Ratahan",
+ "rtm": "Rotuman",
+ "rts": "Yurats",
+ "rtw": "Rathawi",
+ "ru": "Russian",
+ "rub": "Gungu",
+ "ruc": "Ruuli",
+ "rue": "Rusyn",
+ "ruf": "Luguru",
+ "rug": "Roviana",
+ "ruh": "Ruga",
+ "rui": "Rufiji",
+ "ruk": "Che",
+ "ruo": "Istro Romanian",
+ "rup": "Macedo-Romanian; Aromanian; Arumanian",
+ "ruq": "Megleno Romanian",
+ "rut": "Rutul",
+ "ruu": "Lanas Lobu",
+ "ruy": "Mala (Nigeria)",
+ "ruz": "Ruma",
+ "rw": "Kinyarwanda",
+ "rwa": "Rawo",
+ "rwk": "Rwa",
+ "rwl": "Ruwila",
+ "rwm": "Amba (Uganda)",
+ "rwo": "Rawa",
+ "rwr": "Marwari (India)",
+ "rxd": "Ngardi",
+ "rxw": "Karuwali; Garuwali",
+ "ryn": "Northern Amami-Oshima",
+ "rys": "Yaeyama",
+ "ryu": "Central Okinawan",
+ "rzh": "Rāziḥī",
+ "sa": "Sanskrit",
+ "saa": "Saba",
+ "sab": "Buglere",
+ "sac": "Meskwaki",
+ "sad": "Sandawe",
+ "sae": "Sabanê",
+ "saf": "Safaliba",
+ "sah": "Yakut",
+ "sai": "South American Indian languages",
+ "saj": "Sahu",
+ "sak": "Sake",
+ "sal": "Salishan languages",
+ "sam": "Samaritan Aramaic",
+ "sao": "Sause",
+ "saq": "Samburu",
+ "sar": "Saraveca",
+ "sas": "Sasak",
+ "sat": "Santali",
+ "sau": "Saleman",
+ "sav": "Saafi-Saafi",
+ "saw": "Sawi",
+ "sax": "Sa",
+ "say": "Saya",
+ "saz": "Saurashtra",
+ "sba": "Ngambay",
+ "sbb": "Simbo",
+ "sbc": "Kele (Papua New Guinea)",
+ "sbd": "Southern Samo",
+ "sbe": "Saliba",
+ "sbf": "Chabu; Shabo",
+ "sbg": "Seget",
+ "sbh": "Sori-Harengan",
+ "sbi": "Seti",
+ "sbj": "Surbakhal",
+ "sbk": "Safwa",
+ "sbl": "Botolan Sambal",
+ "sbm": "Sagala",
+ "sbn": "Sindhi Bhil",
+ "sbo": "Sabüm",
+ "sbp": "Sangu (Tanzania)",
+ "sbq": "Sileibi",
+ "sbr": "Sembakung Murut",
+ "sbs": "Subiya",
+ "sbt": "Kimki",
+ "sbu": "Stod Bhoti",
+ "sbv": "Sabine",
+ "sbw": "Simba",
+ "sbx": "Seberuang",
+ "sby": "Soli",
+ "sbz": "Sara Kaba",
+ "sc": "Sardinian",
+ "scb": "Chut",
+ "sce": "Dongxiang",
+ "scf": "San Miguel Creole French",
+ "scg": "Sanggau",
+ "sch": "Sakachep",
+ "sci": "Sri Lankan Creole Malay",
+ "sck": "Sadri",
+ "scl": "Shina",
+ "scn": "Sicilian",
+ "sco": "Scots",
+ "scp": "Hyolmo; Helambu Sherpa",
+ "scq": "Sa'och",
+ "scs": "North Slavey",
+ "sct": "Southern Katang",
+ "scu": "Shumcho",
+ "scv": "Sheni",
+ "scw": "Sha",
+ "scx": "Sicel",
+ "sd": "Sindhi",
+ "sda": "Toraja-Sa'dan",
+ "sdb": "Shabak",
+ "sdc": "Sassarese Sardinian",
+ "sde": "Surubu",
+ "sdf": "Sarli",
+ "sdg": "Savi",
+ "sdh": "Southern Kurdish",
+ "sdj": "Suundi",
+ "sdk": "Sos Kundi",
+ "sdl": "Saudi Arabian Sign Language",
+ "sdn": "Gallurese Sardinian",
+ "sdo": "Bukar-Sadung Bidayuh",
+ "sdp": "Sherdukpen",
+ "sdq": "Semandang",
+ "sdr": "Oraon Sadri",
+ "sds": "Sened",
+ "sdt": "Shuadit",
+ "sdu": "Sarudu",
+ "sdv": "Eastern Sudanic languages",
+ "sdx": "Sibu Melanau",
+ "sdz": "Sallands",
+ "se": "Northern Sami",
+ "sea": "Semai",
+ "seb": "Shempire Senoufo",
+ "sec": "Sechelt",
+ "sed": "Sedang",
+ "see": "Seneca",
+ "sef": "Cebaara Senoufo",
+ "seg": "Segeju",
+ "seh": "Sena",
+ "sei": "Seri",
+ "sej": "Sene",
+ "sek": "Sekani",
+ "sel": "Selkup",
+ "sem": "Semitic languages",
+ "sen": "Nanerigé Sénoufo",
+ "seo": "Suarmin",
+ "sep": "Sìcìté Sénoufo",
+ "seq": "Senara Sénoufo",
+ "ser": "Serrano",
+ "ses": "Koyraboro Senni Songhai",
+ "set": "Sentani",
+ "seu": "Serui-Laut",
+ "sev": "Nyarafolo Senoufo",
+ "sew": "Sewa Bay",
+ "sey": "Secoya",
+ "sez": "Senthang Chin",
+ "sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language",
+ "sfe": "Eastern Subanen",
+ "sfm": "Small Flowery Miao",
+ "sfs": "South African Sign Language",
+ "sfw": "Sehwi",
+ "sg": "Sango",
+ "sga": "Old Irish (to 900)",
+ "sgb": "Mag-antsi Ayta",
+ "sgc": "Kipsigis",
+ "sgd": "Surigaonon",
+ "sge": "Segai",
+ "sgg": "Swiss-German Sign Language",
+ "sgh": "Shughni",
+ "sgi": "Suga",
+ "sgj": "Surgujia",
+ "sgk": "Sangkong",
+ "sgm": "Singa",
+ "sgn": "Sign languages",
+ "sgp": "Singpho",
+ "sgr": "Sangisari",
+ "sgs": "Samogitian",
+ "sgt": "Brokpake",
+ "sgu": "Salas",
+ "sgw": "Sebat Bet Gurage",
+ "sgx": "Sierra Leone Sign Language",
+ "sgy": "Sanglechi",
+ "sgz": "Sursurunga",
+ "sh": "Serbo-Croatian",
+ "sha": "Shall-Zwall",
+ "shb": "Ninam",
+ "shc": "Sonde",
+ "shd": "Kundal Shahi",
+ "she": "Sheko",
+ "shg": "Shua",
+ "shh": "Shoshoni",
+ "shi": "Tachelhit",
+ "shj": "Shatt",
+ "shk": "Shilluk",
+ "shl": "Shendu",
+ "shm": "Shahrudi",
+ "shn": "Shan",
+ "sho": "Shanga",
+ "shp": "Shipibo-Conibo",
+ "shq": "Sala",
+ "shr": "Shi",
+ "shs": "Shuswap",
+ "sht": "Shasta",
+ "shu": "Chadian Arabic",
+ "shv": "Shehri",
+ "shw": "Shwai",
+ "shx": "She",
+ "shy": "Tachawit",
+ "shz": "Syenara Senoufo",
+ "si": "Sinhala; Sinhalese",
+ "sia": "Akkala Sami",
+ "sib": "Sebop",
+ "sid": "Sidamo",
+ "sie": "Simaa",
+ "sif": "Siamou",
+ "sig": "Paasaal",
+ "sih": "Zire; Sîshëë",
+ "sii": "Shom Peng",
+ "sij": "Numbami",
+ "sik": "Sikiana",
+ "sil": "Tumulung Sisaala",
+ "sim": "Mende (Papua New Guinea)",
+ "sio": "Siouan languages",
+ "sip": "Sikkimese",
+ "siq": "Sonia",
+ "sir": "Siri",
+ "sis": "Siuslaw",
+ "sit": "Sino-Tibetan languages",
+ "siu": "Sinagen",
+ "siv": "Sumariup",
+ "siw": "Siwai",
+ "six": "Sumau",
+ "siy": "Sivandi",
+ "siz": "Siwi",
+ "sja": "Epena",
+ "sjb": "Sajau Basap",
+ "sjd": "Kildin Sami",
+ "sje": "Pite Sami",
+ "sjg": "Assangori",
+ "sjk": "Kemi Sami",
+ "sjl": "Sajalong; Miji",
+ "sjm": "Mapun",
+ "sjn": "Sindarin",
+ "sjo": "Xibe",
+ "sjp": "Surjapuri",
+ "sjr": "Siar-Lak",
+ "sjs": "Senhaja De Srair",
+ "sjt": "Ter Sami",
+ "sju": "Ume Sami",
+ "sjw": "Shawnee",
+ "sk": "Slovak",
+ "ska": "Skagit",
+ "skb": "Saek",
+ "skc": "Ma Manda",
+ "skd": "Southern Sierra Miwok",
+ "ske": "Seke (Vanuatu)",
+ "skf": "Sakirabiá",
+ "skg": "Sakalava Malagasy",
+ "skh": "Sikule",
+ "ski": "Sika",
+ "skj": "Seke (Nepal)",
+ "skm": "Kutong",
+ "skn": "Kolibugan Subanon",
+ "sko": "Seko Tengah",
+ "skp": "Sekapan",
+ "skq": "Sininkere",
+ "skr": "Saraiki; Seraiki",
+ "sks": "Maia",
+ "skt": "Sakata",
+ "sku": "Sakao",
+ "skv": "Skou",
+ "skw": "Skepi Creole Dutch",
+ "skx": "Seko Padang",
+ "sky": "Sikaiana",
+ "skz": "Sekar",
+ "sl": "Slovenian",
+ "sla": "Slavic languages",
+ "slc": "Sáliba",
+ "sld": "Sissala",
+ "sle": "Sholaga",
+ "slf": "Swiss-Italian Sign Language",
+ "slg": "Selungai Murut",
+ "slh": "Southern Puget Sound Salish",
+ "sli": "Lower Silesian",
+ "slj": "Salumá",
+ "sll": "Salt-Yui",
+ "slm": "Pangutaran Sama",
+ "sln": "Salinan",
+ "slp": "Lamaholot",
+ "slq": "Salchuq",
+ "slr": "Salar",
+ "sls": "Singapore Sign Language",
+ "slt": "Sila",
+ "slu": "Selaru",
+ "slw": "Sialum",
+ "slx": "Salampasu",
+ "sly": "Selayar",
+ "slz": "Ma'ya",
+ "sm": "Samoan",
+ "sma": "Southern Sami",
+ "smb": "Simbari",
+ "smc": "Som",
+ "smf": "Auwe",
+ "smg": "Simbali",
+ "smh": "Samei",
+ "smi": "Sami languages",
+ "smj": "Lule Sami",
+ "smk": "Bolinao",
+ "sml": "Central Sama",
+ "smm": "Musasa",
+ "smn": "Inari Sami",
+ "smp": "Samaritan",
+ "smq": "Samo",
+ "smr": "Simeulue",
+ "sms": "Skolt Sami",
+ "smt": "Simte",
+ "smu": "Somray",
+ "smv": "Samvedi",
+ "smw": "Sumbawa",
+ "smx": "Samba",
+ "smy": "Semnani",
+ "smz": "Simeku",
+ "sn": "Shona",
+ "snc": "Sinaugoro",
+ "sne": "Bau Bidayuh",
+ "snf": "Noon",
+ "sng": "Sanga (Democratic Republic of Congo)",
+ "sni": "Sensi",
+ "snj": "Riverain Sango",
+ "snk": "Soninke",
+ "snl": "Sangil",
+ "snm": "Southern Ma'di",
+ "snn": "Siona",
+ "sno": "Snohomish",
+ "snp": "Siane",
+ "snq": "Sangu (Gabon)",
+ "snr": "Sihan",
+ "sns": "South West Bay; Nahavaq",
+ "snu": "Senggi; Viid",
+ "snv": "Sa'ban",
+ "snw": "Selee",
+ "snx": "Sam",
+ "sny": "Saniyo-Hiyewe",
+ "snz": "Kou",
+ "so": "Somali",
+ "soa": "Thai Song",
+ "sob": "Sobei",
+ "soc": "So (Democratic Republic of Congo)",
+ "sod": "Songoora",
+ "soe": "Songomeno",
+ "sog": "Sogdian",
+ "soh": "Aka",
+ "soi": "Sonha",
+ "soj": "Soi",
+ "sok": "Sokoro",
+ "sol": "Solos",
+ "son": "Songhai languages",
+ "soo": "Songo",
+ "sop": "Songe",
+ "soq": "Kanasi",
+ "sor": "Somrai",
+ "sos": "Seeku",
+ "sou": "Southern Thai",
+ "sov": "Sonsorol",
+ "sow": "Sowanda",
+ "sox": "Swo",
+ "soy": "Miyobe",
+ "soz": "Temi",
+ "spb": "Sepa (Indonesia)",
+ "spc": "Sapé",
+ "spd": "Saep",
+ "spe": "Sepa (Papua New Guinea)",
+ "spg": "Sian",
+ "spi": "Saponi",
+ "spk": "Sengo",
+ "spl": "Selepet",
+ "spm": "Akukem",
+ "spn": "Sanapaná",
+ "spo": "Spokane",
+ "spp": "Supyire Senoufo",
+ "spq": "Loreto-Ucayali Spanish",
+ "spr": "Saparua",
+ "sps": "Saposa",
+ "spt": "Spiti Bhoti",
+ "spu": "Sapuan",
+ "spv": "Sambalpuri; Kosli",
+ "spx": "South Picene",
+ "spy": "Sabaot",
+ "sq": "Albanian",
+ "sqa": "Shama-Sambuga",
+ "sqh": "Shau",
+ "sqj": "Albanian languages",
+ "sqk": "Albanian Sign Language",
+ "sqm": "Suma",
+ "sqn": "Susquehannock",
+ "sqo": "Sorkhei",
+ "sqq": "Sou",
+ "sqr": "Siculo Arabic",
+ "sqs": "Sri Lankan Sign Language",
+ "sqt": "Soqotri",
+ "squ": "Squamish",
+ "sqx": "Kufr Qassem Sign Language (KQSL)",
+ "sr": "Serbian",
+ "sra": "Saruga",
+ "srb": "Sora",
+ "src": "Logudorese Sardinian",
+ "sre": "Sara",
+ "srf": "Nafi",
+ "srg": "Sulod",
+ "srh": "Sarikoli",
+ "sri": "Siriano",
+ "srk": "Serudung Murut",
+ "srl": "Isirawa",
+ "srm": "Saramaccan",
+ "srn": "Sranan Tongo",
+ "sro": "Campidanese Sardinian",
+ "srq": "Sirionó",
+ "srr": "Serer",
+ "srs": "Sarsi",
+ "srt": "Sauri",
+ "sru": "Suruí",
+ "srv": "Southern Sorsoganon",
+ "srw": "Serua",
+ "srx": "Sirmauri",
+ "sry": "Sera",
+ "srz": "Shahmirzadi",
+ "ss": "Swati",
+ "ssa": "Nilo-Saharan languages",
+ "ssb": "Southern Sama",
+ "ssc": "Suba-Simbiti",
+ "ssd": "Siroi",
+ "sse": "Balangingi; Bangingih Sama",
+ "ssf": "Thao",
+ "ssg": "Seimat",
+ "ssh": "Shihhi Arabic",
+ "ssi": "Sansi",
+ "ssj": "Sausi",
+ "ssk": "Sunam",
+ "ssl": "Western Sisaala",
+ "ssm": "Semnam",
+ "ssn": "Waata",
+ "sso": "Sissano",
+ "ssp": "Spanish Sign Language",
+ "ssq": "So'a",
+ "ssr": "Swiss-French Sign Language",
+ "sss": "Sô",
+ "sst": "Sinasina",
+ "ssu": "Susuami",
+ "ssv": "Shark Bay",
+ "ssx": "Samberigi",
+ "ssy": "Saho",
+ "ssz": "Sengseng",
+ "st": "Southern Sotho",
+ "sta": "Settla",
+ "stb": "Northern Subanen",
+ "std": "Sentinel",
+ "ste": "Liana-Seti",
+ "stf": "Seta",
+ "stg": "Trieng",
+ "sth": "Shelta",
+ "sti": "Bulo Stieng",
+ "stj": "Matya Samo",
+ "stk": "Arammba",
+ "stl": "Stellingwerfs",
+ "stm": "Setaman",
+ "stn": "Owa",
+ "sto": "Stoney",
+ "stp": "Southeastern Tepehuan",
+ "stq": "Saterfriesisch",
+ "str": "Straits Salish",
+ "sts": "Shumashti",
+ "stt": "Budeh Stieng",
+ "stu": "Samtao",
+ "stv": "Silt'e",
+ "stw": "Satawalese",
+ "sty": "Siberian Tatar",
+ "su": "Sundanese",
+ "sua": "Sulka",
+ "sub": "Suku",
+ "suc": "Western Subanon",
+ "sue": "Suena",
+ "sug": "Suganga",
+ "sui": "Suki",
+ "suj": "Shubi",
+ "suk": "Sukuma",
+ "suo": "Bouni",
+ "suq": "Tirmaga-Chai Suri; Suri",
+ "sur": "Mwaghavul",
+ "sus": "Susu",
+ "sut": "Subtiaba",
+ "suv": "Puroik",
+ "suw": "Sumbwa",
+ "sux": "Sumerian",
+ "suy": "Suyá",
+ "suz": "Sunwar",
+ "sv": "Swedish",
+ "sva": "Svan",
+ "svb": "Ulau-Suain",
+ "svc": "Vincentian Creole English",
+ "sve": "Serili",
+ "svk": "Slovakian Sign Language",
+ "svm": "Slavomolisano",
+ "svs": "Savosavo",
+ "svx": "Skalvian",
+ "sw": "Swahili (macrolanguage)",
+ "swb": "Maore Comorian",
+ "swc": "Congo Swahili",
+ "swf": "Sere",
+ "swg": "Swabian",
+ "swh": "Swahili (individual language); Kiswahili",
+ "swi": "Sui",
+ "swj": "Sira",
+ "swk": "Malawi Sena",
+ "swl": "Swedish Sign Language",
+ "swm": "Samosa",
+ "swn": "Sawknah",
+ "swo": "Shanenawa",
+ "swp": "Suau",
+ "swq": "Sharwa",
+ "swr": "Saweru",
+ "sws": "Seluwasan",
+ "swt": "Sawila",
+ "swu": "Suwawa",
+ "swv": "Shekhawati",
+ "sww": "Sowa",
+ "swx": "Suruahá",
+ "swy": "Sarua",
+ "sxb": "Suba",
+ "sxc": "Sicanian",
+ "sxe": "Sighu",
+ "sxg": "Shuhi; Shixing",
+ "sxk": "Southern Kalapuya",
+ "sxl": "Selian",
+ "sxm": "Samre",
+ "sxn": "Sangir",
+ "sxo": "Sorothaptic",
+ "sxr": "Saaroa",
+ "sxs": "Sasaru",
+ "sxu": "Upper Saxon",
+ "sxw": "Saxwe Gbe",
+ "sya": "Siang",
+ "syb": "Central Subanen",
+ "syc": "Classical Syriac",
+ "syd": "Samoyedic languages",
+ "syi": "Seki",
+ "syk": "Sukur",
+ "syl": "Sylheti",
+ "sym": "Maya Samo",
+ "syn": "Senaya",
+ "syo": "Suoy",
+ "syr": "Syriac",
+ "sys": "Sinyar",
+ "syw": "Kagate",
+ "syx": "Samay",
+ "syy": "Al-Sayyid Bedouin Sign Language",
+ "sza": "Semelai",
+ "szb": "Ngalum",
+ "szc": "Semaq Beri",
+ "szd": "Seru",
+ "sze": "Seze",
+ "szg": "Sengele",
+ "szl": "Silesian",
+ "szn": "Sula",
+ "szp": "Suabo",
+ "szs": "Solomon Islands Sign Language",
+ "szv": "Isu (Fako Division)",
+ "szw": "Sawai",
+ "szy": "Sakizaya",
+ "ta": "Tamil",
+ "taa": "Lower Tanana",
+ "tab": "Tabassaran",
+ "tac": "Lowland Tarahumara",
+ "tad": "Tause",
+ "tae": "Tariana",
+ "taf": "Tapirapé",
+ "tag": "Tagoi",
+ "tai": "Tai languages",
+ "taj": "Eastern Tamang",
+ "tak": "Tala",
+ "tal": "Tal",
+ "tan": "Tangale",
+ "tao": "Yami",
+ "tap": "Taabwa",
+ "taq": "Tamasheq",
+ "tar": "Central Tarahumara",
+ "tas": "Tay Boi",
+ "tau": "Upper Tanana",
+ "tav": "Tatuyo",
+ "taw": "Tai",
+ "tax": "Tamki",
+ "tay": "Atayal",
+ "taz": "Tocho",
+ "tba": "Aikanã",
+ "tbc": "Takia",
+ "tbd": "Kaki Ae",
+ "tbe": "Tanimbili",
+ "tbf": "Mandara",
+ "tbg": "North Tairora",
+ "tbh": "Dharawal; Thurawal",
+ "tbi": "Gaam",
+ "tbj": "Tiang",
+ "tbk": "Calamian Tagbanwa",
+ "tbl": "Tboli",
+ "tbm": "Tagbu",
+ "tbn": "Barro Negro Tunebo",
+ "tbo": "Tawala",
+ "tbp": "Taworta; Diebroud",
+ "tbq": "Tibeto-Burman languages",
+ "tbr": "Tumtum",
+ "tbs": "Tanguat",
+ "tbt": "Tembo (Kitembo)",
+ "tbu": "Tubar",
+ "tbv": "Tobo",
+ "tbw": "Tagbanwa",
+ "tbx": "Kapin",
+ "tby": "Tabaru",
+ "tbz": "Ditammari",
+ "tca": "Ticuna",
+ "tcb": "Tanacross",
+ "tcc": "Datooga",
+ "tcd": "Tafi",
+ "tce": "Southern Tutchone",
+ "tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec",
+ "tcg": "Tamagario",
+ "tch": "Turks And Caicos Creole English",
+ "tci": "Wára",
+ "tck": "Tchitchege",
+ "tcl": "Taman (Myanmar)",
+ "tcm": "Tanahmerah",
+ "tcn": "Tichurong",
+ "tco": "Taungyo",
+ "tcp": "Tawr Chin",
+ "tcq": "Kaiy",
+ "tcs": "Torres Strait Creole; Yumplatok",
+ "tct": "T'en",
+ "tcu": "Southeastern Tarahumara",
+ "tcw": "Tecpatlán Totonac",
+ "tcx": "Toda",
+ "tcy": "Tulu",
+ "tcz": "Thado Chin",
+ "tda": "Tagdal",
+ "tdb": "Panchpargania",
+ "tdc": "Emberá-Tadó",
+ "tdd": "Tai Nüa",
+ "tde": "Tiranige Diga Dogon",
+ "tdf": "Talieng",
+ "tdg": "Western Tamang",
+ "tdh": "Thulung",
+ "tdi": "Tomadino",
+ "tdj": "Tajio",
+ "tdk": "Tambas",
+ "tdl": "Sur",
+ "tdm": "Taruma",
+ "tdn": "Tondano",
+ "tdo": "Teme",
+ "tdq": "Tita",
+ "tdr": "Todrah",
+ "tds": "Doutai",
+ "tdt": "Tetun Dili",
+ "tdv": "Toro",
+ "tdx": "Tandroy-Mahafaly Malagasy",
+ "tdy": "Tadyawan",
+ "te": "Telugu",
+ "tea": "Temiar",
+ "teb": "Tetete",
+ "tec": "Terik",
+ "ted": "Tepo Krumen",
+ "tee": "Huehuetla Tepehua",
+ "tef": "Teressa",
+ "teg": "Teke-Tege",
+ "teh": "Tehuelche",
+ "tei": "Torricelli",
+ "tek": "Ibali Teke",
+ "tem": "Timne",
+ "ten": "Tama (Colombia)",
+ "teo": "Teso",
+ "tep": "Tepecano",
+ "teq": "Temein",
+ "ter": "Tereno",
+ "tes": "Tengger",
+ "tet": "Tetum",
+ "teu": "Soo",
+ "tev": "Teor",
+ "tew": "Tewa (USA)",
+ "tex": "Tennet",
+ "tey": "Tulishi",
+ "tez": "Tetserret",
+ "tfi": "Tofin Gbe",
+ "tfn": "Tanaina",
+ "tfo": "Tefaro",
+ "tfr": "Teribe",
+ "tft": "Ternate",
+ "tg": "Tajik",
+ "tga": "Sagalla",
+ "tgb": "Tobilung",
+ "tgc": "Tigak",
+ "tgd": "Ciwogai",
+ "tge": "Eastern Gorkha Tamang",
+ "tgf": "Chalikha",
+ "tgh": "Tobagonian Creole English",
+ "tgi": "Lawunuia",
+ "tgj": "Tagin",
+ "tgn": "Tandaganon",
+ "tgo": "Sudest",
+ "tgp": "Tangoa",
+ "tgq": "Tring",
+ "tgr": "Tareng",
+ "tgs": "Nume",
+ "tgt": "Central Tagbanwa",
+ "tgu": "Tanggu",
+ "tgv": "Tingui-Boto",
+ "tgw": "Tagwana Senoufo",
+ "tgx": "Tagish",
+ "tgy": "Togoyo",
+ "tgz": "Tagalaka",
+ "th": "Thai",
+ "thd": "Kuuk Thaayorre; Thayore",
+ "the": "Chitwania Tharu",
+ "thf": "Thangmi",
+ "thh": "Northern Tarahumara",
+ "thi": "Tai Long",
+ "thk": "Tharaka; Kitharaka",
+ "thl": "Dangaura Tharu",
+ "thm": "Aheu",
+ "thn": "Thachanadan",
+ "thp": "Thompson",
+ "thq": "Kochila Tharu",
+ "thr": "Rana Tharu",
+ "ths": "Thakali",
+ "tht": "Tahltan",
+ "thu": "Thuri",
+ "thv": "Tahaggart Tamahaq",
+ "thy": "Tha",
+ "thz": "Tayart Tamajeq",
+ "ti": "Tigrinya",
+ "tia": "Tidikelt Tamazight",
+ "tic": "Tira",
+ "tif": "Tifal",
+ "tig": "Tigre",
+ "tih": "Timugon Murut",
+ "tii": "Tiene",
+ "tij": "Tilung",
+ "tik": "Tikar",
+ "til": "Tillamook",
+ "tim": "Timbe",
+ "tin": "Tindi",
+ "tio": "Teop",
+ "tip": "Trimuris",
+ "tiq": "Tiéfo",
+ "tis": "Masadiit Itneg",
+ "tit": "Tinigua",
+ "tiu": "Adasen",
+ "tiv": "Tiv",
+ "tiw": "Tiwi",
+ "tix": "Southern Tiwa",
+ "tiy": "Tiruray",
+ "tiz": "Tai Hongjin",
+ "tja": "Tajuasohn",
+ "tjg": "Tunjung",
+ "tji": "Northern Tujia",
+ "tjj": "Tjungundji",
+ "tjl": "Tai Laing",
+ "tjm": "Timucua",
+ "tjn": "Tonjon",
+ "tjo": "Temacine Tamazight",
+ "tjp": "Tjupany",
+ "tjs": "Southern Tujia",
+ "tju": "Tjurruru",
+ "tjw": "Djabwurrung",
+ "tk": "Turkmen",
+ "tka": "Truká",
+ "tkb": "Buksa",
+ "tkd": "Tukudede",
+ "tke": "Takwane",
+ "tkf": "Tukumanféd",
+ "tkg": "Tesaka Malagasy",
+ "tkl": "Tokelau",
+ "tkm": "Takelma",
+ "tkn": "Toku-No-Shima",
+ "tkp": "Tikopia",
+ "tkq": "Tee",
+ "tkr": "Tsakhur",
+ "tks": "Takestani",
+ "tkt": "Kathoriya Tharu",
+ "tku": "Upper Necaxa Totonac",
+ "tkv": "Mur Pano",
+ "tkw": "Teanu",
+ "tkx": "Tangko",
+ "tkz": "Takua",
+ "tl": "Tagalog",
+ "tla": "Southwestern Tepehuan",
+ "tlb": "Tobelo",
+ "tlc": "Yecuatla Totonac",
+ "tld": "Talaud",
+ "tlf": "Telefol",
+ "tlg": "Tofanma",
+ "tlh": "Klingon; tlhIngan Hol",
+ "tli": "Tlingit",
+ "tlj": "Talinga-Bwisi",
+ "tlk": "Taloki",
+ "tll": "Tetela",
+ "tlm": "Tolomako",
+ "tln": "Talondo'",
+ "tlo": "Talodi",
+ "tlp": "Filomena Mata-Coahuitlán Totonac",
+ "tlq": "Tai Loi",
+ "tlr": "Talise",
+ "tls": "Tambotalo",
+ "tlt": "Sou Nama; Teluti",
+ "tlu": "Tulehu",
+ "tlv": "Taliabu",
+ "tlx": "Khehek",
+ "tly": "Talysh",
+ "tma": "Tama (Chad)",
+ "tmb": "Katbol; Avava",
+ "tmc": "Tumak",
+ "tmd": "Haruai",
+ "tme": "Tremembé",
+ "tmf": "Toba-Maskoy",
+ "tmg": "Ternateño",
+ "tmh": "Tamashek",
+ "tmi": "Tutuba",
+ "tmj": "Samarokena",
+ "tmk": "Northwestern Tamang",
+ "tml": "Tamnim Citak",
+ "tmm": "Tai Thanh",
+ "tmn": "Taman (Indonesia)",
+ "tmo": "Temoq",
+ "tmq": "Tumleo",
+ "tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)",
+ "tms": "Tima",
+ "tmt": "Tasmate",
+ "tmu": "Iau",
+ "tmv": "Tembo (Motembo)",
+ "tmw": "Temuan",
+ "tmy": "Tami",
+ "tmz": "Tamanaku",
+ "tn": "Tswana",
+ "tna": "Tacana",
+ "tnb": "Western Tunebo",
+ "tnc": "Tanimuca-Retuarã",
+ "tnd": "Angosturas Tunebo",
+ "tng": "Tobanga",
+ "tnh": "Maiani",
+ "tni": "Tandia",
+ "tnk": "Kwamera",
+ "tnl": "Lenakel",
+ "tnm": "Tabla",
+ "tnn": "North Tanna",
+ "tno": "Toromono",
+ "tnp": "Whitesands",
+ "tnq": "Taino",
+ "tnr": "Ménik",
+ "tns": "Tenis",
+ "tnt": "Tontemboan",
+ "tnu": "Tay Khang",
+ "tnv": "Tangchangya",
+ "tnw": "Tonsawang",
+ "tnx": "Tanema",
+ "tny": "Tongwe",
+ "tnz": "Ten'edn",
+ "to": "Tonga (Tonga Islands)",
+ "tob": "Toba",
+ "toc": "Coyutla Totonac",
+ "tod": "Toma",
+ "tof": "Gizrra",
+ "tog": "Tonga (Nyasa)",
+ "toh": "Gitonga",
+ "toi": "Tonga (Zambia)",
+ "toj": "Tojolabal",
+ "tok": "Toki Pona",
+ "tol": "Tolowa",
+ "tom": "Tombulu",
+ "too": "Xicotepec De Juárez Totonac",
+ "top": "Papantla Totonac",
+ "toq": "Toposa",
+ "tor": "Togbo-Vara Banda",
+ "tos": "Highland Totonac",
+ "tou": "Tho",
+ "tov": "Upper Taromi",
+ "tow": "Jemez",
+ "tox": "Tobian",
+ "toy": "Topoiyo",
+ "toz": "To",
+ "tpa": "Taupota",
+ "tpc": "Azoyú Me'phaa; Azoyú Tlapanec",
+ "tpe": "Tippera",
+ "tpf": "Tarpia",
+ "tpg": "Kula",
+ "tpi": "Tok Pisin",
+ "tpj": "Tapieté",
+ "tpk": "Tupinikin",
+ "tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec",
+ "tpm": "Tampulma",
+ "tpn": "Tupinambá",
+ "tpo": "Tai Pao",
+ "tpp": "Pisaflores Tepehua",
+ "tpq": "Tukpa",
+ "tpr": "Tuparí",
+ "tpt": "Tlachichilco Tepehua",
+ "tpu": "Tampuan",
+ "tpv": "Tanapag",
+ "tpw": "Tupí",
+ "tpx": "Acatepec Me'phaa; Acatepec Tlapanec",
+ "tpy": "Trumai",
+ "tpz": "Tinputz",
+ "tqb": "Tembé",
+ "tql": "Lehali",
+ "tqm": "Turumsa",
+ "tqn": "Tenino",
+ "tqo": "Toaripi",
+ "tqp": "Tomoip",
+ "tqq": "Tunni",
+ "tqr": "Torona",
+ "tqt": "Western Totonac",
+ "tqu": "Touo",
+ "tqw": "Tonkawa",
+ "tr": "Turkish",
+ "tra": "Tirahi",
+ "trb": "Terebu",
+ "trc": "Copala Triqui",
+ "trd": "Turi",
+ "tre": "East Tarangan",
+ "trf": "Trinidadian Creole English",
+ "trg": "Lishán Didán",
+ "trh": "Turaka",
+ "tri": "Trió",
+ "trj": "Toram",
+ "trk": "Turkic languages",
+ "trl": "Traveller Scottish",
+ "trm": "Tregami",
+ "trn": "Trinitario",
+ "tro": "Tarao Naga",
+ "trp": "Kok Borok",
+ "trq": "San Martín Itunyoso Triqui",
+ "trr": "Taushiro",
+ "trs": "Chicahuaxtla Triqui",
+ "trt": "Tunggare",
+ "tru": "Turoyo; Surayt",
+ "trv": "Sediq; Seediq; Taroko",
+ "trw": "Torwali",
+ "trx": "Tringgus-Sembaan Bidayuh",
+ "try": "Turung",
+ "trz": "Torá",
+ "ts": "Tsonga",
+ "tsa": "Tsaangi",
+ "tsb": "Tsamai",
+ "tsc": "Tswa",
+ "tsd": "Tsakonian",
+ "tse": "Tunisian Sign Language",
+ "tsg": "Tausug",
+ "tsh": "Tsuvan",
+ "tsi": "Tsimshian",
+ "tsj": "Tshangla",
+ "tsk": "Tseku",
+ "tsl": "Ts'ün-Lao",
+ "tsm": "Turkish Sign Language; Türk İşaret Dili",
+ "tsp": "Northern Toussian",
+ "tsq": "Thai Sign Language",
+ "tsr": "Akei",
+ "tss": "Taiwan Sign Language",
+ "tst": "Tondi Songway Kiini",
+ "tsu": "Tsou",
+ "tsv": "Tsogo",
+ "tsw": "Tsishingini",
+ "tsx": "Mubami",
+ "tsy": "Tebul Sign Language",
+ "tsz": "Purepecha",
+ "tt": "Tatar",
+ "tta": "Tutelo",
+ "ttb": "Gaa",
+ "ttc": "Tektiteko",
+ "ttd": "Tauade",
+ "tte": "Bwanabwana",
+ "ttf": "Tuotomb",
+ "ttg": "Tutong",
+ "tth": "Upper Ta'oih",
+ "tti": "Tobati",
+ "ttj": "Tooro",
+ "ttk": "Totoro",
+ "ttl": "Totela",
+ "ttm": "Northern Tutchone",
+ "ttn": "Towei",
+ "tto": "Lower Ta'oih",
+ "ttp": "Tombelala",
+ "ttq": "Tawallammat Tamajaq",
+ "ttr": "Tera",
+ "tts": "Northeastern Thai",
+ "ttt": "Muslim Tat",
+ "ttu": "Torau",
+ "ttv": "Titan",
+ "ttw": "Long Wat",
+ "tty": "Sikaritai",
+ "ttz": "Tsum",
+ "tua": "Wiarumus",
+ "tub": "Tübatulabal",
+ "tuc": "Mutu",
+ "tud": "Tuxá",
+ "tue": "Tuyuca",
+ "tuf": "Central Tunebo",
+ "tug": "Tunia",
+ "tuh": "Taulil",
+ "tui": "Tupuri",
+ "tuj": "Tugutil",
+ "tul": "Tula",
+ "tum": "Tumbuka",
+ "tun": "Tunica",
+ "tuo": "Tucano",
+ "tup": "Tupi languages",
+ "tuq": "Tedaga",
+ "tus": "Tuscarora",
+ "tut": "Altaic languages",
+ "tuu": "Tututni",
+ "tuv": "Turkana",
+ "tuw": "Tungus languages",
+ "tux": "Tuxináwa",
+ "tuy": "Tugen",
+ "tuz": "Turka",
+ "tva": "Vaghua",
+ "tvd": "Tsuvadi",
+ "tve": "Te'un",
+ "tvk": "Southeast Ambrym",
+ "tvl": "Tuvalu",
+ "tvm": "Tela-Masbuar",
+ "tvn": "Tavoyan",
+ "tvo": "Tidore",
+ "tvs": "Taveta",
+ "tvt": "Tutsa Naga",
+ "tvu": "Tunen",
+ "tvw": "Sedoa",
+ "tvx": "Taivoan",
+ "tvy": "Timor Pidgin",
+ "tw": "Twi",
+ "twa": "Twana",
+ "twb": "Western Tawbuid",
+ "twc": "Teshenawa",
+ "twd": "Twents",
+ "twe": "Tewa (Indonesia)",
+ "twf": "Northern Tiwa",
+ "twg": "Tereweng",
+ "twh": "Tai Dón",
+ "twl": "Tawara",
+ "twm": "Tawang Monpa",
+ "twn": "Twendi",
+ "two": "Tswapong",
+ "twp": "Ere",
+ "twq": "Tasawaq",
+ "twr": "Southwestern Tarahumara",
+ "twt": "Turiwára",
+ "twu": "Termanu",
+ "tww": "Tuwari",
+ "twx": "Tewe",
+ "twy": "Tawoyan",
+ "txa": "Tombonuo",
+ "txb": "Tokharian B",
+ "txc": "Tsetsaut",
+ "txe": "Totoli",
+ "txg": "Tangut",
+ "txh": "Thracian",
+ "txi": "Ikpeng",
+ "txj": "Tarjumo",
+ "txm": "Tomini",
+ "txn": "West Tarangan",
+ "txo": "Toto",
+ "txq": "Tii",
+ "txr": "Tartessian",
+ "txs": "Tonsea",
+ "txt": "Citak",
+ "txu": "Kayapó",
+ "txx": "Tatana",
+ "txy": "Tanosy Malagasy",
+ "ty": "Tahitian",
+ "tya": "Tauya",
+ "tye": "Kyanga",
+ "tyh": "O'du",
+ "tyi": "Teke-Tsaayi",
+ "tyj": "Tai Do; Tai Yo",
+ "tyl": "Thu Lao",
+ "tyn": "Kombai",
+ "typ": "Thaypan",
+ "tyr": "Tai Daeng",
+ "tys": "Tày Sa Pa",
+ "tyt": "Tày Tac",
+ "tyu": "Kua",
+ "tyv": "Tuvinian",
+ "tyx": "Teke-Tyee",
+ "tyy": "Tiyaa",
+ "tyz": "Tày",
+ "tza": "Tanzanian Sign Language",
+ "tzh": "Tzeltal",
+ "tzj": "Tz'utujil",
+ "tzl": "Talossan",
+ "tzm": "Central Atlas Tamazight",
+ "tzn": "Tugun",
+ "tzo": "Tzotzil",
+ "tzx": "Tabriak",
+ "uam": "Uamué",
+ "uan": "Kuan",
+ "uar": "Tairuma",
+ "uba": "Ubang",
+ "ubi": "Ubi",
+ "ubl": "Buhi'non Bikol",
+ "ubr": "Ubir",
+ "ubu": "Umbu-Ungu",
+ "uby": "Ubykh",
+ "uda": "Uda",
+ "ude": "Udihe",
+ "udg": "Muduga",
+ "udi": "Udi",
+ "udj": "Ujir",
+ "udl": "Wuzlam",
+ "udm": "Udmurt",
+ "udu": "Uduk",
+ "ues": "Kioko",
+ "ufi": "Ufim",
+ "ug": "Uighur; Uyghur",
+ "uga": "Ugaritic",
+ "ugb": "Kuku-Ugbanh",
+ "uge": "Ughele",
+ "ugh": "Kubachi",
+ "ugn": "Ugandan Sign Language",
+ "ugo": "Ugong",
+ "ugy": "Uruguayan Sign Language",
+ "uha": "Uhami",
+ "uhn": "Damal",
+ "uis": "Uisai",
+ "uiv": "Iyive",
+ "uji": "Tanjijili",
+ "uk": "Ukrainian",
+ "uka": "Kaburi",
+ "ukg": "Ukuriguma",
+ "ukh": "Ukhwejo",
+ "uki": "Kui (India)",
+ "ukk": "Muak Sa-aak",
+ "ukl": "Ukrainian Sign Language",
+ "ukp": "Ukpe-Bayobiri",
+ "ukq": "Ukwa",
+ "uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language",
+ "uku": "Ukue",
+ "ukv": "Kuku",
+ "ukw": "Ukwuani-Aboh-Ndoni",
+ "uky": "Kuuk-Yak",
+ "ula": "Fungwa",
+ "ulb": "Ulukwumi",
+ "ulc": "Ulch",
+ "ule": "Lule",
+ "ulf": "Usku; Afra",
+ "uli": "Ulithian",
+ "ulk": "Meriam Mir",
+ "ull": "Ullatan",
+ "ulm": "Ulumanda'",
+ "uln": "Unserdeutsch",
+ "ulu": "Uma' Lung",
+ "ulw": "Ulwa",
+ "uma": "Umatilla",
+ "umb": "Umbundu",
+ "umc": "Marrucinian",
+ "umd": "Umbindhamu",
+ "umg": "Morrobalama; Umbuygamu",
+ "umi": "Ukit",
+ "umm": "Umon",
+ "umn": "Makyan Naga",
+ "umo": "Umotína",
+ "ump": "Umpila",
+ "umr": "Umbugarla",
+ "ums": "Pendau",
+ "umu": "Munsee",
+ "una": "North Watut",
+ "und": "Undetermined",
+ "une": "Uneme",
+ "ung": "Ngarinyin",
+ "uni": "Uni",
+ "unk": "Enawené-Nawé",
+ "unm": "Unami",
+ "unn": "Kurnai",
+ "unr": "Mundari",
+ "unu": "Unubahe",
+ "unx": "Munda",
+ "unz": "Unde Kaili",
+ "uon": "Kulon",
+ "upi": "Umeda",
+ "upv": "Uripiv-Wala-Rano-Atchin",
+ "ur": "Urdu",
+ "ura": "Urarina",
+ "urb": "Urubú-Kaapor; Kaapor",
+ "urc": "Urningangg",
+ "ure": "Uru",
+ "urf": "Uradhi",
+ "urg": "Urigina",
+ "urh": "Urhobo",
+ "uri": "Urim",
+ "urj": "Uralic languages",
+ "urk": "Urak Lawoi'",
+ "url": "Urali",
+ "urm": "Urapmin",
+ "urn": "Uruangnirin",
+ "uro": "Ura (Papua New Guinea)",
+ "urp": "Uru-Pa-In",
+ "urr": "Lehalurup; Löyöp",
+ "urt": "Urat",
+ "uru": "Urumi",
+ "urv": "Uruava",
+ "urw": "Sop",
+ "urx": "Urimo",
+ "ury": "Orya",
+ "urz": "Uru-Eu-Wau-Wau",
+ "usa": "Usarufa",
+ "ush": "Ushojo",
+ "usi": "Usui",
+ "usk": "Usaghade",
+ "usp": "Uspanteco",
+ "uss": "us-Saare",
+ "usu": "Uya",
+ "uta": "Otank",
+ "ute": "Ute-Southern Paiute",
+ "uth": "ut-Hun",
+ "utp": "Amba (Solomon Islands)",
+ "utr": "Etulo",
+ "utu": "Utu",
+ "uum": "Urum",
+ "uur": "Ura (Vanuatu)",
+ "uuu": "U",
+ "uve": "West Uvean; Fagauvea",
+ "uvh": "Uri",
+ "uvl": "Lote",
+ "uwa": "Kuku-Uwanh",
+ "uya": "Doko-Uyanga",
+ "uz": "Uzbek",
+ "uzn": "Northern Uzbek",
+ "uzs": "Southern Uzbek",
+ "vaa": "Vaagri Booli",
+ "vae": "Vale",
+ "vaf": "Vafsi",
+ "vag": "Vagla",
+ "vah": "Varhadi-Nagpuri",
+ "vai": "Vai",
+ "vaj": "Sekele; Northwestern ǃKung; Vasekele",
+ "val": "Vehes",
+ "vam": "Vanimo",
+ "van": "Valman",
+ "vao": "Vao",
+ "vap": "Vaiphei",
+ "var": "Huarijio",
+ "vas": "Vasavi",
+ "vau": "Vanuma",
+ "vav": "Varli",
+ "vay": "Wayu",
+ "vbb": "Southeast Babar",
+ "vbk": "Southwestern Bontok",
+ "ve": "Venda",
+ "vec": "Venetian",
+ "ved": "Veddah",
+ "vel": "Veluws",
+ "vem": "Vemgo-Mabas",
+ "veo": "Ventureño",
+ "vep": "Veps",
+ "ver": "Mom Jango",
+ "vgr": "Vaghri",
+ "vgt": "Vlaamse Gebarentaal; Flemish Sign Language",
+ "vi": "Vietnamese",
+ "vic": "Virgin Islands Creole English",
+ "vid": "Vidunda",
+ "vif": "Vili",
+ "vig": "Viemo",
+ "vil": "Vilela",
+ "vin": "Vinza",
+ "vis": "Vishavan",
+ "vit": "Viti",
+ "viv": "Iduna",
+ "vka": "Kariyarra",
+ "vkj": "Kujarge",
+ "vkk": "Kaur",
+ "vkl": "Kulisusu",
+ "vkm": "Kamakan",
+ "vkn": "Koro Nulu",
+ "vko": "Kodeoha",
+ "vkp": "Korlai Creole Portuguese",
+ "vkt": "Tenggarong Kutai Malay",
+ "vku": "Kurrama",
+ "vkz": "Koro Zuba",
+ "vlp": "Valpei",
+ "vls": "Vlaams",
+ "vma": "Martuyhunira",
+ "vmb": "Barbaram",
+ "vmc": "Juxtlahuaca Mixtec",
+ "vmd": "Mudu Koraga",
+ "vme": "East Masela",
+ "vmf": "Mainfränkisch",
+ "vmg": "Lungalunga",
+ "vmh": "Maraghei",
+ "vmi": "Miwa",
+ "vmj": "Ixtayutla Mixtec",
+ "vmk": "Makhuwa-Shirima",
+ "vml": "Malgana",
+ "vmm": "Mitlatongo Mixtec",
+ "vmp": "Soyaltepec Mazatec",
+ "vmq": "Soyaltepec Mixtec",
+ "vmr": "Marenje",
+ "vms": "Moksela",
+ "vmu": "Muluridyi",
+ "vmv": "Valley Maidu",
+ "vmw": "Makhuwa",
+ "vmx": "Tamazola Mixtec",
+ "vmy": "Ayautla Mazatec",
+ "vmz": "Mazatlán Mazatec",
+ "vnk": "Vano; Lovono",
+ "vnm": "Vinmavis; Neve'ei",
+ "vnp": "Vunapu",
+ "vo": "Volapük",
+ "vor": "Voro",
+ "vot": "Votic",
+ "vra": "Vera'a",
+ "vro": "Võro",
+ "vrs": "Varisi",
+ "vrt": "Burmbar; Banam Bay",
+ "vsi": "Moldova Sign Language",
+ "vsl": "Venezuelan Sign Language",
+ "vsv": "Valencian Sign Language; Llengua de signes valenciana",
+ "vto": "Vitou",
+ "vum": "Vumbu",
+ "vun": "Vunjo",
+ "vut": "Vute",
+ "vwa": "Awa (China)",
+ "wa": "Walloon",
+ "waa": "Walla Walla",
+ "wab": "Wab",
+ "wac": "Wasco-Wishram",
+ "wad": "Wamesa; Wondama",
+ "wae": "Walser",
+ "waf": "Wakoná",
+ "wag": "Wa'ema",
+ "wah": "Watubela",
+ "wai": "Wares",
+ "waj": "Waffa",
+ "wak": "Wakashan languages",
+ "wal": "Wolaytta; Wolaitta",
+ "wam": "Wampanoag",
+ "wan": "Wan",
+ "wao": "Wappo",
+ "wap": "Wapishana",
+ "waq": "Wagiman",
+ "war": "Waray (Philippines)",
+ "was": "Washo",
+ "wat": "Kaninuwa",
+ "wau": "Waurá",
+ "wav": "Waka",
+ "waw": "Waiwai",
+ "wax": "Watam; Marangis",
+ "way": "Wayana",
+ "waz": "Wampur",
+ "wba": "Warao",
+ "wbb": "Wabo",
+ "wbe": "Waritai",
+ "wbf": "Wara",
+ "wbh": "Wanda",
+ "wbi": "Vwanji",
+ "wbj": "Alagwa",
+ "wbk": "Waigali",
+ "wbl": "Wakhi",
+ "wbm": "Wa",
+ "wbp": "Warlpiri",
+ "wbq": "Waddar",
+ "wbr": "Wagdi",
+ "wbs": "West Bengal Sign Language",
+ "wbt": "Warnman",
+ "wbv": "Wajarri",
+ "wbw": "Woi",
+ "wca": "Yanomámi",
+ "wci": "Waci Gbe",
+ "wdd": "Wandji",
+ "wdg": "Wadaginam",
+ "wdj": "Wadjiginy",
+ "wdk": "Wadikali",
+ "wdt": "Wendat",
+ "wdu": "Wadjigu",
+ "wdy": "Wadjabangayi",
+ "wea": "Wewaw",
+ "wec": "Wè Western",
+ "wed": "Wedau",
+ "weg": "Wergaia",
+ "weh": "Weh",
+ "wei": "Kiunum",
+ "wem": "Weme Gbe",
+ "wen": "Sorbian languages",
+ "weo": "Wemale",
+ "wep": "Westphalien",
+ "wer": "Weri",
+ "wes": "Cameroon Pidgin",
+ "wet": "Perai",
+ "weu": "Rawngtu Chin",
+ "wew": "Wejewa",
+ "wfg": "Yafi; Zorop",
+ "wga": "Wagaya",
+ "wgb": "Wagawaga",
+ "wgg": "Wangkangurru; Wangganguru",
+ "wgi": "Wahgi",
+ "wgo": "Waigeo",
+ "wgu": "Wirangu",
+ "wgy": "Warrgamay",
+ "wha": "Sou Upaa; Manusela",
+ "whg": "North Wahgi",
+ "whk": "Wahau Kenyah",
+ "whu": "Wahau Kayan",
+ "wib": "Southern Toussian",
+ "wic": "Wichita",
+ "wie": "Wik-Epa",
+ "wif": "Wik-Keyangan",
+ "wig": "Wik Ngathan",
+ "wih": "Wik-Me'anha",
+ "wii": "Minidien",
+ "wij": "Wik-Iiyanh",
+ "wik": "Wikalkan",
+ "wil": "Wilawila",
+ "wim": "Wik-Mungkan",
+ "win": "Ho-Chunk",
+ "wir": "Wiraféd",
+ "wiu": "Wiru",
+ "wiv": "Vitu",
+ "wiy": "Wiyot",
+ "wja": "Waja",
+ "wji": "Warji",
+ "wka": "Kw'adza",
+ "wkb": "Kumbaran",
+ "wkd": "Wakde; Mo",
+ "wkl": "Kalanadi",
+ "wkr": "Keerray-Woorroong",
+ "wku": "Kunduvadi",
+ "wkw": "Wakawaka",
+ "wky": "Wangkayutyuru",
+ "wla": "Walio",
+ "wlc": "Mwali Comorian",
+ "wle": "Wolane",
+ "wlg": "Kunbarlang",
+ "wlh": "Welaun",
+ "wli": "Waioli",
+ "wlk": "Wailaki",
+ "wll": "Wali (Sudan)",
+ "wlm": "Middle Welsh",
+ "wlo": "Wolio",
+ "wlr": "Wailapa",
+ "wls": "Wallisian",
+ "wlu": "Wuliwuli",
+ "wlv": "Wichí Lhamtés Vejoz",
+ "wlw": "Walak",
+ "wlx": "Wali (Ghana)",
+ "wly": "Waling",
+ "wma": "Mawa (Nigeria)",
+ "wmb": "Wambaya",
+ "wmc": "Wamas",
+ "wmd": "Mamaindé",
+ "wme": "Wambule",
+ "wmg": "Western Minyag",
+ "wmh": "Waima'a",
+ "wmi": "Wamin",
+ "wmm": "Maiwa (Indonesia)",
+ "wmn": "Waamwang",
+ "wmo": "Wom (Papua New Guinea)",
+ "wms": "Wambon",
+ "wmt": "Walmajarri",
+ "wmw": "Mwani",
+ "wmx": "Womo",
+ "wnb": "Wanambre",
+ "wnc": "Wantoat",
+ "wnd": "Wandarang",
+ "wne": "Waneci",
+ "wng": "Wanggom",
+ "wni": "Ndzwani Comorian",
+ "wnk": "Wanukaka",
+ "wnm": "Wanggamala",
+ "wnn": "Wunumara",
+ "wno": "Wano",
+ "wnp": "Wanap",
+ "wnu": "Usan",
+ "wnw": "Wintu",
+ "wny": "Wanyi; Waanyi",
+ "wo": "Wolof",
+ "woa": "Kuwema; Tyaraity",
+ "wob": "Wè Northern",
+ "woc": "Wogeo",
+ "wod": "Wolani",
+ "woe": "Woleaian",
+ "wof": "Gambian Wolof",
+ "wog": "Wogamusin",
+ "woi": "Kamang",
+ "wok": "Longto",
+ "wom": "Wom (Nigeria)",
+ "won": "Wongo",
+ "woo": "Manombai",
+ "wor": "Woria",
+ "wos": "Hanga Hundi",
+ "wow": "Wawonii",
+ "woy": "Weyto",
+ "wpc": "Maco",
+ "wrb": "Waluwarra; Warluwara",
+ "wrg": "Warungu; Gudjal",
+ "wrh": "Wiradjuri",
+ "wri": "Wariyangga",
+ "wrk": "Garrwa",
+ "wrl": "Warlmanpa",
+ "wrm": "Warumungu",
+ "wrn": "Warnang",
+ "wro": "Worrorra",
+ "wrp": "Waropen",
+ "wrr": "Wardaman",
+ "wrs": "Waris",
+ "wru": "Waru",
+ "wrv": "Waruna",
+ "wrw": "Gugu Warra",
+ "wrx": "Wae Rana",
+ "wry": "Merwari",
+ "wrz": "Waray (Australia)",
+ "wsa": "Warembori",
+ "wsg": "Adilabad Gondi",
+ "wsi": "Wusi",
+ "wsk": "Waskia",
+ "wsr": "Owenia",
+ "wss": "Wasa",
+ "wsu": "Wasu",
+ "wsv": "Wotapuri-Katarqalai",
+ "wtf": "Watiwa",
+ "wth": "Wathawurrung",
+ "wti": "Berta",
+ "wtk": "Watakataui",
+ "wtm": "Mewati",
+ "wtw": "Wotu",
+ "wua": "Wikngenchera",
+ "wub": "Wunambal",
+ "wud": "Wudu",
+ "wuh": "Wutunhua",
+ "wul": "Silimo",
+ "wum": "Wumbvu",
+ "wun": "Bungu",
+ "wur": "Wurrugu",
+ "wut": "Wutung",
+ "wuu": "Wu Chinese",
+ "wuv": "Wuvulu-Aua",
+ "wux": "Wulna",
+ "wuy": "Wauyai",
+ "wwa": "Waama",
+ "wwb": "Wakabunga",
+ "wwo": "Wetamut; Dorig",
+ "wwr": "Warrwa",
+ "www": "Wawa",
+ "wxa": "Waxianghua",
+ "wxw": "Wardandi",
+ "wyb": "Wangaaybuwan-Ngiyambaa",
+ "wyi": "Woiwurrung",
+ "wym": "Wymysorys",
+ "wyn": "Wyandot",
+ "wyr": "Wayoró",
+ "wyy": "Western Fijian",
+ "xaa": "Andalusian Arabic",
+ "xab": "Sambe",
+ "xac": "Kachari",
+ "xad": "Adai",
+ "xae": "Aequian",
+ "xag": "Aghwan",
+ "xai": "Kaimbé",
+ "xaj": "Ararandewára",
+ "xak": "Máku",
+ "xal": "Kalmyk; Oirat",
+ "xam": "ǀXam",
+ "xan": "Xamtanga",
+ "xao": "Khao",
+ "xap": "Apalachee",
+ "xaq": "Aquitanian",
+ "xar": "Karami",
+ "xas": "Kamas",
+ "xat": "Katawixi",
+ "xau": "Kauwera",
+ "xav": "Xavánte",
+ "xaw": "Kawaiisu",
+ "xay": "Kayan Mahakam",
+ "xbb": "Lower Burdekin",
+ "xbc": "Bactrian",
+ "xbd": "Bindal",
+ "xbe": "Bigambal",
+ "xbg": "Bunganditj",
+ "xbi": "Kombio",
+ "xbj": "Birrpayi",
+ "xbm": "Middle Breton",
+ "xbn": "Kenaboi",
+ "xbo": "Bolgarian",
+ "xbp": "Bibbulman",
+ "xbr": "Kambera",
+ "xbw": "Kambiwá",
+ "xby": "Batjala; Batyala",
+ "xcb": "Cumbric",
+ "xcc": "Camunic",
+ "xce": "Celtiberian",
+ "xcg": "Cisalpine Gaulish",
+ "xch": "Chemakum; Chimakum",
+ "xcl": "Classical Armenian",
+ "xcm": "Comecrudo",
+ "xcn": "Cotoname",
+ "xco": "Chorasmian",
+ "xcr": "Carian",
+ "xct": "Classical Tibetan",
+ "xcu": "Curonian",
+ "xcv": "Chuvantsy",
+ "xcw": "Coahuilteco",
+ "xcy": "Cayuse",
+ "xda": "Darkinyung",
+ "xdc": "Dacian",
+ "xdk": "Dharuk",
+ "xdm": "Edomite",
+ "xdo": "Kwandu",
+ "xdq": "Kaitag",
+ "xdy": "Malayic Dayak",
+ "xeb": "Eblan",
+ "xed": "Hdi",
+ "xeg": "ǁXegwi",
+ "xel": "Kelo",
+ "xem": "Kembayan",
+ "xep": "Epi-Olmec",
+ "xer": "Xerénte",
+ "xes": "Kesawai",
+ "xet": "Xetá",
+ "xeu": "Keoru-Ahia",
+ "xfa": "Faliscan",
+ "xga": "Galatian",
+ "xgb": "Gbin",
+ "xgd": "Gudang",
+ "xgf": "Gabrielino-Fernandeño",
+ "xgg": "Goreng",
+ "xgi": "Garingbal",
+ "xgl": "Galindan",
+ "xgm": "Dharumbal; Guwinmal",
+ "xgn": "Mongolian languages",
+ "xgr": "Garza",
+ "xgu": "Unggumi",
+ "xgw": "Guwa",
+ "xh": "Xhosa",
+ "xha": "Harami",
+ "xhc": "Hunnic",
+ "xhd": "Hadrami",
+ "xhe": "Khetrani",
+ "xhm": "Middle Khmer (1400 to 1850 CE)",
+ "xhr": "Hernican",
+ "xht": "Hattic",
+ "xhu": "Hurrian",
+ "xhv": "Khua",
+ "xib": "Iberian",
+ "xii": "Xiri",
+ "xil": "Illyrian",
+ "xin": "Xinca",
+ "xir": "Xiriâna",
+ "xis": "Kisan",
+ "xiv": "Indus Valley Language",
+ "xiy": "Xipaya",
+ "xjb": "Minjungbal",
+ "xjt": "Jaitmatang",
+ "xka": "Kalkoti",
+ "xkb": "Northern Nago",
+ "xkc": "Kho'ini",
+ "xkd": "Mendalam Kayan",
+ "xke": "Kereho",
+ "xkf": "Khengkha",
+ "xkg": "Kagoro",
+ "xki": "Kenyan Sign Language",
+ "xkj": "Kajali",
+ "xkk": "Kachok; Kaco'",
+ "xkl": "Mainstream Kenyah",
+ "xkn": "Kayan River Kayan",
+ "xko": "Kiorr",
+ "xkp": "Kabatei",
+ "xkq": "Koroni",
+ "xkr": "Xakriabá",
+ "xks": "Kumbewaha",
+ "xkt": "Kantosi",
+ "xku": "Kaamba",
+ "xkv": "Kgalagadi",
+ "xkw": "Kembra",
+ "xkx": "Karore",
+ "xky": "Uma' Lasan",
+ "xkz": "Kurtokha",
+ "xla": "Kamula",
+ "xlb": "Loup B",
+ "xlc": "Lycian",
+ "xld": "Lydian",
+ "xle": "Lemnian",
+ "xlg": "Ligurian (Ancient)",
+ "xli": "Liburnian",
+ "xln": "Alanic",
+ "xlo": "Loup A",
+ "xlp": "Lepontic",
+ "xls": "Lusitanian",
+ "xlu": "Cuneiform Luwian",
+ "xly": "Elymian",
+ "xma": "Mushungulu",
+ "xmb": "Mbonga",
+ "xmc": "Makhuwa-Marrevone",
+ "xmd": "Mbudum",
+ "xme": "Median",
+ "xmf": "Mingrelian",
+ "xmg": "Mengaka",
+ "xmh": "Kugu-Muminh",
+ "xmj": "Majera",
+ "xmk": "Ancient Macedonian",
+ "xml": "Malaysian Sign Language",
+ "xmm": "Manado Malay",
+ "xmn": "Manichaean Middle Persian",
+ "xmo": "Morerebi",
+ "xmp": "Kuku-Mu'inh",
+ "xmq": "Kuku-Mangk",
+ "xmr": "Meroitic",
+ "xms": "Moroccan Sign Language",
+ "xmt": "Matbat",
+ "xmu": "Kamu",
+ "xmv": "Antankarana Malagasy; Tankarana Malagasy",
+ "xmw": "Tsimihety Malagasy",
+ "xmx": "Salawati; Maden",
+ "xmy": "Mayaguduna",
+ "xmz": "Mori Bawah",
+ "xna": "Ancient North Arabian",
+ "xnb": "Kanakanabu",
+ "xnd": "Na-Dene languages",
+ "xng": "Middle Mongolian",
+ "xnh": "Kuanhua",
+ "xni": "Ngarigu",
+ "xnj": "Ngoni (Tanzania)",
+ "xnk": "Nganakarti",
+ "xnm": "Ngumbarl",
+ "xnn": "Northern Kankanay",
+ "xno": "Anglo-Norman",
+ "xnq": "Ngoni (Mozambique)",
+ "xnr": "Kangri",
+ "xns": "Kanashi",
+ "xnt": "Narragansett",
+ "xnu": "Nukunul",
+ "xny": "Nyiyaparli",
+ "xnz": "Kenzi; Mattoki",
+ "xoc": "O'chi'chi'",
+ "xod": "Kokoda",
+ "xog": "Soga",
+ "xoi": "Kominimung",
+ "xok": "Xokleng",
+ "xom": "Komo (Sudan)",
+ "xon": "Konkomba",
+ "xoo": "Xukurú",
+ "xop": "Kopar",
+ "xor": "Korubo",
+ "xow": "Kowaki",
+ "xpa": "Pirriya",
+ "xpb": "Northeastern Tasmanian; Pyemmairrener",
+ "xpc": "Pecheneg",
+ "xpd": "Oyster Bay Tasmanian",
+ "xpe": "Liberia Kpelle",
+ "xpf": "Southeast Tasmanian; Nuenonne",
+ "xpg": "Phrygian",
+ "xph": "North Midlands Tasmanian; Tyerrenoterpanner",
+ "xpi": "Pictish",
+ "xpj": "Mpalitjanh",
+ "xpk": "Kulina Pano",
+ "xpl": "Port Sorell Tasmanian",
+ "xpm": "Pumpokol",
+ "xpn": "Kapinawá",
+ "xpo": "Pochutec",
+ "xpp": "Puyo-Paekche",
+ "xpq": "Mohegan-Pequot",
+ "xpr": "Parthian",
+ "xps": "Pisidian",
+ "xpt": "Punthamara",
+ "xpu": "Punic",
+ "xpv": "Northern Tasmanian; Tommeginne",
+ "xpw": "Northwestern Tasmanian; Peerapper",
+ "xpx": "Southwestern Tasmanian; Toogee",
+ "xpy": "Puyo",
+ "xpz": "Bruny Island Tasmanian",
+ "xqa": "Karakhanid",
+ "xqt": "Qatabanian",
+ "xra": "Krahô",
+ "xrb": "Eastern Karaboro",
+ "xrd": "Gundungurra",
+ "xre": "Kreye",
+ "xrg": "Minang",
+ "xri": "Krikati-Timbira",
+ "xrm": "Armazic",
+ "xrn": "Arin",
+ "xrr": "Raetic",
+ "xrt": "Aranama-Tamique",
+ "xru": "Marriammu",
+ "xrw": "Karawa",
+ "xsa": "Sabaean",
+ "xsb": "Sambal",
+ "xsc": "Scythian",
+ "xsd": "Sidetic",
+ "xse": "Sempan",
+ "xsh": "Shamang",
+ "xsi": "Sio",
+ "xsj": "Subi",
+ "xsl": "South Slavey",
+ "xsm": "Kasem",
+ "xsn": "Sanga (Nigeria)",
+ "xso": "Solano",
+ "xsp": "Silopi",
+ "xsq": "Makhuwa-Saka",
+ "xsr": "Sherpa",
+ "xss": "Assan",
+ "xsu": "Sanumá",
+ "xsv": "Sudovian",
+ "xsy": "Saisiyat",
+ "xta": "Alcozauca Mixtec",
+ "xtb": "Chazumba Mixtec",
+ "xtc": "Katcha-Kadugli-Miri",
+ "xtd": "Diuxi-Tilantongo Mixtec",
+ "xte": "Ketengban",
+ "xtg": "Transalpine Gaulish",
+ "xth": "Yitha Yitha",
+ "xti": "Sinicahua Mixtec",
+ "xtj": "San Juan Teita Mixtec",
+ "xtl": "Tijaltepec Mixtec",
+ "xtm": "Magdalena Peñasco Mixtec",
+ "xtn": "Northern Tlaxiaco Mixtec",
+ "xto": "Tokharian A",
+ "xtp": "San Miguel Piedras Mixtec",
+ "xtq": "Tumshuqese",
+ "xtr": "Early Tripuri",
+ "xts": "Sindihui Mixtec",
+ "xtt": "Tacahua Mixtec",
+ "xtu": "Cuyamecalco Mixtec",
+ "xtv": "Thawa",
+ "xtw": "Tawandê",
+ "xty": "Yoloxochitl Mixtec",
+ "xua": "Alu Kurumba",
+ "xub": "Betta Kurumba",
+ "xud": "Umiida",
+ "xug": "Kunigami",
+ "xuj": "Jennu Kurumba",
+ "xul": "Ngunawal; Nunukul",
+ "xum": "Umbrian",
+ "xun": "Unggaranggu",
+ "xuo": "Kuo",
+ "xup": "Upper Umpqua",
+ "xur": "Urartian",
+ "xut": "Kuthant",
+ "xuu": "Kxoe; Khwedam",
+ "xve": "Venetic",
+ "xvi": "Kamviri",
+ "xvn": "Vandalic",
+ "xvo": "Volscian",
+ "xvs": "Vestinian",
+ "xwa": "Kwaza",
+ "xwc": "Woccon",
+ "xwd": "Wadi Wadi",
+ "xwe": "Xwela Gbe",
+ "xwg": "Kwegu",
+ "xwj": "Wajuk",
+ "xwk": "Wangkumara",
+ "xwl": "Western Xwla Gbe",
+ "xwo": "Written Oirat",
+ "xwr": "Kwerba Mamberamo",
+ "xwt": "Wotjobaluk",
+ "xww": "Wemba Wemba",
+ "xxb": "Boro (Ghana)",
+ "xxk": "Ke'o",
+ "xxm": "Minkin",
+ "xxr": "Koropó",
+ "xxt": "Tambora",
+ "xya": "Yaygir",
+ "xyb": "Yandjibara",
+ "xyj": "Mayi-Yapi",
+ "xyk": "Mayi-Kulan",
+ "xyl": "Yalakalore",
+ "xyt": "Mayi-Thakurti",
+ "xyy": "Yorta Yorta",
+ "xzh": "Zhang-Zhung",
+ "xzm": "Zemgalian",
+ "xzp": "Ancient Zapotec",
+ "yaa": "Yaminahua",
+ "yab": "Yuhup",
+ "yac": "Pass Valley Yali",
+ "yad": "Yagua",
+ "yae": "Pumé",
+ "yaf": "Yaka (Democratic Republic of Congo)",
+ "yag": "Yámana",
+ "yah": "Yazgulyam",
+ "yai": "Yagnobi",
+ "yaj": "Banda-Yangere",
+ "yak": "Yakama",
+ "yal": "Yalunka",
+ "yam": "Yamba",
+ "yan": "Mayangna",
+ "yao": "Yao",
+ "yap": "Yapese",
+ "yaq": "Yaqui",
+ "yar": "Yabarana",
+ "yas": "Nugunu (Cameroon)",
+ "yat": "Yambeta",
+ "yau": "Yuwana",
+ "yav": "Yangben",
+ "yaw": "Yawalapití",
+ "yax": "Yauma",
+ "yay": "Agwagwune",
+ "yaz": "Lokaa",
+ "yba": "Yala",
+ "ybb": "Yemba",
+ "ybe": "West Yugur",
+ "ybh": "Yakha",
+ "ybi": "Yamphu",
+ "ybj": "Hasha",
+ "ybk": "Bokha",
+ "ybl": "Yukuben",
+ "ybm": "Yaben",
+ "ybn": "Yabaâna",
+ "ybo": "Yabong",
+ "ybx": "Yawiyo",
+ "yby": "Yaweyuha",
+ "ych": "Chesu",
+ "ycl": "Lolopo",
+ "ycn": "Yucuna",
+ "ycp": "Chepya",
+ "yda": "Yanda",
+ "ydd": "Eastern Yiddish",
+ "yde": "Yangum Dey",
+ "ydg": "Yidgha",
+ "ydk": "Yoidik",
+ "yea": "Ravula",
+ "yec": "Yeniche",
+ "yee": "Yimas",
+ "yei": "Yeni",
+ "yej": "Yevanic",
+ "yel": "Yela",
+ "yer": "Tarok",
+ "yes": "Nyankpa",
+ "yet": "Yetfa",
+ "yeu": "Yerukula",
+ "yev": "Yapunda",
+ "yey": "Yeyi",
+ "yga": "Malyangapa",
+ "ygi": "Yiningayi",
+ "ygl": "Yangum Gel",
+ "ygm": "Yagomi",
+ "ygp": "Gepo",
+ "ygr": "Yagaria",
+ "ygs": "Yolŋu Sign Language",
+ "ygu": "Yugul",
+ "ygw": "Yagwoia",
+ "yha": "Baha Buyang",
+ "yhd": "Judeo-Iraqi Arabic",
+ "yhl": "Hlepho Phowa",
+ "yhs": "Yan-nhaŋu Sign Language",
+ "yi": "Yiddish",
+ "yia": "Yinggarda",
+ "yif": "Ache",
+ "yig": "Wusa Nasu",
+ "yih": "Western Yiddish",
+ "yii": "Yidiny",
+ "yij": "Yindjibarndi",
+ "yik": "Dongshanba Lalo",
+ "yil": "Yindjilandji",
+ "yim": "Yimchungru Naga",
+ "yin": "Riang Lai; Yinchia",
+ "yip": "Pholo",
+ "yiq": "Miqie",
+ "yir": "North Awyu",
+ "yis": "Yis",
+ "yit": "Eastern Lalu",
+ "yiu": "Awu",
+ "yiv": "Northern Nisu",
+ "yix": "Axi Yi",
+ "yiz": "Azhe",
+ "yka": "Yakan",
+ "ykg": "Northern Yukaghir",
+ "yki": "Yoke",
+ "ykk": "Yakaikeke",
+ "ykl": "Khlula",
+ "ykm": "Kap",
+ "ykn": "Kua-nsi",
+ "yko": "Yasa",
+ "ykr": "Yekora",
+ "ykt": "Kathu",
+ "yku": "Kuamasi",
+ "yky": "Yakoma",
+ "yla": "Yaul",
+ "ylb": "Yaleba",
+ "yle": "Yele",
+ "ylg": "Yelogu",
+ "yli": "Angguruk Yali",
+ "yll": "Yil",
+ "ylm": "Limi",
+ "yln": "Langnian Buyang",
+ "ylo": "Naluo Yi",
+ "ylr": "Yalarnnga",
+ "ylu": "Aribwaung",
+ "yly": "Nyâlayu; Nyelâyu",
+ "ymb": "Yambes",
+ "ymc": "Southern Muji",
+ "ymd": "Muda",
+ "yme": "Yameo",
+ "ymg": "Yamongeri",
+ "ymh": "Mili",
+ "ymi": "Moji",
+ "ymk": "Makwe",
+ "yml": "Iamalele",
+ "ymm": "Maay",
+ "ymn": "Yamna; Sunum",
+ "ymo": "Yangum Mon",
+ "ymp": "Yamap",
+ "ymq": "Qila Muji",
+ "ymr": "Malasar",
+ "yms": "Mysian",
+ "ymx": "Northern Muji",
+ "ymz": "Muzi",
+ "yna": "Aluo",
+ "ynd": "Yandruwandha",
+ "yne": "Lang'e",
+ "yng": "Yango",
+ "ynk": "Naukan Yupik",
+ "ynl": "Yangulam",
+ "ynn": "Yana",
+ "yno": "Yong",
+ "ynq": "Yendang",
+ "yns": "Yansi",
+ "ynu": "Yahuna",
+ "yo": "Yoruba",
+ "yob": "Yoba",
+ "yog": "Yogad",
+ "yoi": "Yonaguni",
+ "yok": "Yokuts",
+ "yol": "Yola",
+ "yom": "Yombe",
+ "yon": "Yongkom",
+ "yot": "Yotti",
+ "yox": "Yoron",
+ "yoy": "Yoy",
+ "ypa": "Phala",
+ "ypb": "Labo Phowa",
+ "ypg": "Phola",
+ "yph": "Phupha",
+ "ypk": "Yupik languages",
+ "ypm": "Phuma",
+ "ypn": "Ani Phowa",
+ "ypo": "Alo Phola",
+ "ypp": "Phupa",
+ "ypz": "Phuza",
+ "yra": "Yerakai",
+ "yrb": "Yareba",
+ "yre": "Yaouré",
+ "yrk": "Nenets",
+ "yrl": "Nhengatu",
+ "yrm": "Yirrk-Mel",
+ "yrn": "Yerong",
+ "yro": "Yaroamë",
+ "yrs": "Yarsun",
+ "yrw": "Yarawata",
+ "yry": "Yarluyandi",
+ "ysc": "Yassic",
+ "ysd": "Samatao",
+ "ysg": "Sonaga",
+ "ysl": "Yugoslavian Sign Language",
+ "ysm": "Myanmar Sign Language",
+ "ysn": "Sani",
+ "yso": "Nisi (China)",
+ "ysp": "Southern Lolopo",
+ "ysr": "Sirenik Yupik",
+ "yss": "Yessan-Mayo",
+ "ysy": "Sanie",
+ "yta": "Talu",
+ "ytl": "Tanglang",
+ "ytp": "Thopho",
+ "ytw": "Yout Wam",
+ "yty": "Yatay",
+ "yua": "Yucateco; Yucatec Maya",
+ "yub": "Yugambal",
+ "yuc": "Yuchi",
+ "yud": "Judeo-Tripolitanian Arabic",
+ "yue": "Yue Chinese; Cantonese",
+ "yuf": "Havasupai-Walapai-Yavapai",
+ "yug": "Yug",
+ "yui": "Yurutí",
+ "yuj": "Karkar-Yuri",
+ "yuk": "Yuki",
+ "yul": "Yulu",
+ "yum": "Quechan",
+ "yun": "Bena (Nigeria)",
+ "yup": "Yukpa",
+ "yuq": "Yuqui",
+ "yur": "Yurok",
+ "yut": "Yopno",
+ "yuw": "Yau (Morobe Province)",
+ "yux": "Southern Yukaghir",
+ "yuy": "East Yugur",
+ "yuz": "Yuracare",
+ "yva": "Yawa",
+ "yvt": "Yavitero",
+ "ywa": "Kalou",
+ "ywg": "Yinhawangka",
+ "ywl": "Western Lalu",
+ "ywn": "Yawanawa",
+ "ywq": "Wuding-Luquan Yi",
+ "ywr": "Yawuru",
+ "ywt": "Xishanba Lalo; Central Lalo",
+ "ywu": "Wumeng Nasu",
+ "yww": "Yawarawarga",
+ "yxa": "Mayawali",
+ "yxg": "Yagara",
+ "yxl": "Yardliyawarra",
+ "yxm": "Yinwum",
+ "yxu": "Yuyu",
+ "yxy": "Yabula Yabula",
+ "yyr": "Yir Yoront",
+ "yyu": "Yau (Sandaun Province)",
+ "yyz": "Ayizi",
+ "yzg": "E'ma Buyang",
+ "yzk": "Zokhuo",
+ "za": "Zhuang; Chuang",
+ "zaa": "Sierra de Juárez Zapotec",
+ "zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec",
+ "zac": "Ocotlán Zapotec",
+ "zad": "Cajonos Zapotec",
+ "zae": "Yareni Zapotec",
+ "zaf": "Ayoquesco Zapotec",
+ "zag": "Zaghawa",
+ "zah": "Zangwal",
+ "zai": "Isthmus Zapotec",
+ "zaj": "Zaramo",
+ "zak": "Zanaki",
+ "zal": "Zauzou",
+ "zam": "Miahuatlán Zapotec",
+ "zao": "Ozolotepec Zapotec",
+ "zap": "Zapotec",
+ "zaq": "Aloápam Zapotec",
+ "zar": "Rincón Zapotec",
+ "zas": "Santo Domingo Albarradas Zapotec",
+ "zat": "Tabaa Zapotec",
+ "zau": "Zangskari",
+ "zav": "Yatzachi Zapotec",
+ "zaw": "Mitla Zapotec",
+ "zax": "Xadani Zapotec",
+ "zay": "Zayse-Zergulla; Zaysete",
+ "zaz": "Zari",
+ "zba": "Balaibalan",
+ "zbc": "Central Berawan",
+ "zbe": "East Berawan",
+ "zbl": "Blissymbols; Bliss; Blissymbolics",
+ "zbt": "Batui",
+ "zbu": "Bu (Bauchi State)",
+ "zbw": "West Berawan",
+ "zca": "Coatecas Altas Zapotec",
+ "zcd": "Las Delicias Zapotec",
+ "zch": "Central Hongshuihe Zhuang",
+ "zdj": "Ngazidja Comorian",
+ "zea": "Zeeuws",
+ "zeg": "Zenag",
+ "zeh": "Eastern Hongshuihe Zhuang",
+ "zen": "Zenaga",
+ "zga": "Kinga",
+ "zgb": "Guibei Zhuang",
+ "zgh": "Standard Moroccan Tamazight",
+ "zgm": "Minz Zhuang",
+ "zgn": "Guibian Zhuang",
+ "zgr": "Magori",
+ "zh": "Chinese",
+ "zhb": "Zhaba",
+ "zhd": "Dai Zhuang",
+ "zhi": "Zhire",
+ "zhn": "Nong Zhuang",
+ "zhw": "Zhoa",
+ "zhx": "Chinese (family)",
+ "zia": "Zia",
+ "zib": "Zimbabwe Sign Language",
+ "zik": "Zimakani",
+ "zil": "Zialo",
+ "zim": "Mesme",
+ "zin": "Zinza",
+ "ziw": "Zigula",
+ "ziz": "Zizilivakan",
+ "zka": "Kaimbulawa",
+ "zkb": "Koibal",
+ "zkd": "Kadu",
+ "zkg": "Koguryo",
+ "zkh": "Khorezmian",
+ "zkk": "Karankawa",
+ "zkn": "Kanan",
+ "zko": "Kott",
+ "zkp": "São Paulo Kaingáng",
+ "zkr": "Zakhring",
+ "zkt": "Kitan",
+ "zku": "Kaurna",
+ "zkv": "Krevinian",
+ "zkz": "Khazar",
+ "zla": "Zula",
+ "zle": "East Slavic languages",
+ "zlj": "Liujiang Zhuang",
+ "zlm": "Malay (individual language)",
+ "zln": "Lianshan Zhuang",
+ "zlq": "Liuqian Zhuang",
+ "zls": "South Slavic languages",
+ "zlw": "West Slavic languages",
+ "zma": "Manda (Australia)",
+ "zmb": "Zimba",
+ "zmc": "Margany",
+ "zmd": "Maridan",
+ "zme": "Mangerr",
+ "zmf": "Mfinu",
+ "zmg": "Marti Ke",
+ "zmh": "Makolkol",
+ "zmi": "Negeri Sembilan Malay",
+ "zmj": "Maridjabin",
+ "zmk": "Mandandanyi",
+ "zml": "Matngala",
+ "zmm": "Marimanindji; Marramaninyshi",
+ "zmn": "Mbangwe",
+ "zmo": "Molo",
+ "zmp": "Mpuono",
+ "zmq": "Mituku",
+ "zmr": "Maranunggu",
+ "zms": "Mbesa",
+ "zmt": "Maringarr",
+ "zmu": "Muruwari",
+ "zmv": "Mbariman-Gudhinma",
+ "zmw": "Mbo (Democratic Republic of Congo)",
+ "zmx": "Bomitaba",
+ "zmy": "Mariyedi",
+ "zmz": "Mbandja",
+ "zna": "Zan Gula",
+ "znd": "Zande languages",
+ "zne": "Zande (individual language)",
+ "zng": "Mang",
+ "znk": "Manangkari",
+ "zns": "Mangas",
+ "zoc": "Copainalá Zoque",
+ "zoh": "Chimalapa Zoque",
+ "zom": "Zou",
+ "zoo": "Asunción Mixtepec Zapotec",
+ "zoq": "Tabasco Zoque",
+ "zor": "Rayón Zoque",
+ "zos": "Francisco León Zoque",
+ "zpa": "Lachiguiri Zapotec",
+ "zpb": "Yautepec Zapotec",
+ "zpc": "Choapan Zapotec",
+ "zpd": "Southeastern Ixtlán Zapotec",
+ "zpe": "Petapa Zapotec",
+ "zpf": "San Pedro Quiatoni Zapotec",
+ "zpg": "Guevea De Humboldt Zapotec",
+ "zph": "Totomachapan Zapotec",
+ "zpi": "Santa María Quiegolani Zapotec",
+ "zpj": "Quiavicuzas Zapotec",
+ "zpk": "Tlacolulita Zapotec",
+ "zpl": "Lachixío Zapotec",
+ "zpm": "Mixtepec Zapotec",
+ "zpn": "Santa Inés Yatzechi Zapotec",
+ "zpo": "Amatlán Zapotec",
+ "zpp": "El Alto Zapotec",
+ "zpq": "Zoogocho Zapotec",
+ "zpr": "Santiago Xanica Zapotec",
+ "zps": "Coatlán Zapotec",
+ "zpt": "San Vicente Coatlán Zapotec",
+ "zpu": "Yalálag Zapotec",
+ "zpv": "Chichicapan Zapotec",
+ "zpw": "Zaniza Zapotec",
+ "zpx": "San Baltazar Loxicha Zapotec",
+ "zpy": "Mazaltepec Zapotec",
+ "zpz": "Texmelucan Zapotec",
+ "zqe": "Qiubei Zhuang",
+ "zra": "Kara (Korea)",
+ "zrg": "Mirgan",
+ "zrn": "Zerenkel",
+ "zro": "Záparo",
+ "zrp": "Zarphatic",
+ "zrs": "Mairasi",
+ "zsa": "Sarasira",
+ "zsk": "Kaskean",
+ "zsl": "Zambian Sign Language",
+ "zsm": "Standard Malay",
+ "zsr": "Southern Rincon Zapotec",
+ "zsu": "Sukurum",
+ "zte": "Elotepec Zapotec",
+ "ztg": "Xanaguía Zapotec",
+ "ztl": "Lapaguía-Guivini Zapotec",
+ "ztm": "San Agustín Mixtepec Zapotec",
+ "ztn": "Santa Catarina Albarradas Zapotec",
+ "ztp": "Loxicha Zapotec",
+ "ztq": "Quioquitani-Quierí Zapotec",
+ "zts": "Tilquiapan Zapotec",
+ "ztt": "Tejalapan Zapotec",
+ "ztu": "Güilá Zapotec",
+ "ztx": "Zaachila Zapotec",
+ "zty": "Yatee Zapotec",
+ "zu": "Zulu",
+ "zua": "Zeem",
+ "zuh": "Tokano",
+ "zum": "Kumzari",
+ "zun": "Zuni",
+ "zuy": "Zumaya",
+ "zwa": "Zay",
+ "zyb": "Yongbei Zhuang",
+ "zyg": "Yang Zhuang",
+ "zyj": "Youjiang Zhuang",
+ "zyn": "Yongnan Zhuang",
+ "zyp": "Zyphe Chin",
+ "zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki",
+ "zzj": "Zuojiang Zhuang"
+}
\ No newline at end of file
diff --git a/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/size_categories.json b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/size_categories.json
new file mode 100644
index 0000000000000000000000000000000000000000..983ce0c10dbb2e2245f90ae47e9de4c1025d5bb1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/datasets/utils/resources/size_categories.json
@@ -0,0 +1,14 @@
+[
+ "unknown",
+ "n<1K",
+ "1K1T"
+]