diff --git a/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b5c3a409d7f329c9a44111dc1dba156710a57dba
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/15.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:929555ecf3a84a09dbb0adf6abce9c36ae0b9b84114921beac5066f33a45c7f9
+size 33555612
diff --git a/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fba83efe62dc633af88a54c1b9a25ee90e9fcc98
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/17.attention.query_key_value.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0509bc0ff365fd6e8cb292534fb79e5ac1abfa14165d165555b1dd7f194d0aaa
+size 50332749
diff --git a/ckpts/universal/global_step20/zero/18.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/18.attention.dense.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8314cf00268c190befb12f5dde747726fff64afc
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/18.attention.dense.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aeaa31a4a99e6eba1e9955db4c27f1412e0ea156029115bc4691f1684455a2b6
+size 16778396
diff --git a/ckpts/universal/global_step20/zero/18.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/18.attention.dense.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..49cdaae226684d3535f08394b25fcd32ff4cab97
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/18.attention.dense.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a546964f06449aeeebda85ccb697d5601209f9c88ecbedcb1a012bcee820eca
+size 16778317
diff --git a/ckpts/universal/global_step20/zero/9.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/9.mlp.dense_4h_to_h.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..8a0436b277e7c36d4b2ff9392226b1f08efa1f7f
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/9.mlp.dense_4h_to_h.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b0f1da3f760ef72009420595c4abe76a9a6633487215813eacadbdd377feb6c6
+size 33555533
diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1a712717f13598756f30185b7df5aff375870eb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert_to_parquet.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert_to_parquet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..61dac93986cb196e497116375ece5f45786b8cb7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/convert_to_parquet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..58b779f41d7ea7c528b867b1c68ce4f4c12f2b78
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/commands/__pycache__/test.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/download/__init__.py b/venv/lib/python3.10/site-packages/datasets/download/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ae0d436504dc2e609b0ca8851509c72a161dbde
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/download/__init__.py
@@ -0,0 +1,10 @@
+__all__ = [
+ "DownloadConfig",
+ "DownloadManager",
+ "DownloadMode",
+ "StreamingDownloadManager",
+]
+
+from .download_config import DownloadConfig
+from .download_manager import DownloadManager, DownloadMode
+from .streaming_download_manager import StreamingDownloadManager
diff --git a/venv/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7befcf6c784d40d8bf3f339572bc70d0e30cd4e8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f767d1428434727ca811978c6eb833f071583735
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_config.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a0abd6927c4128fa0361785d7fda5407d83e3c6e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/download_manager.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67ff8e5e0cee90b0de1c9e756ee9501248b83320
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9491777063cff1451fe6a4186940ec7ce81ae79
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/download/download_config.py b/venv/lib/python3.10/site-packages/datasets/download/download_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1fdf9be6a2379a9f3a2d0933aebe3c45689f337
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/download/download_config.py
@@ -0,0 +1,108 @@
+import copy
+import warnings
+from dataclasses import InitVar, dataclass, field
+from pathlib import Path
+from typing import Any, Dict, Optional, Union
+
+from .. import config
+
+
+@dataclass
+class DownloadConfig:
+ """Configuration for our cached path manager.
+
+ Attributes:
+ cache_dir (`str` or `Path`, *optional*):
+ Specify a cache directory to save the file to (overwrite the
+ default cache dir).
+ force_download (`bool`, defaults to `False`):
+ If `True`, re-dowload the file even if it's already cached in
+ the cache dir.
+ resume_download (`bool`, defaults to `False`):
+ If `True`, resume the download if an incompletely received file is
+ found.
+ proxies (`dict`, *optional*):
+ user_agent (`str`, *optional*):
+ Optional string or dict that will be appended to the user-agent on remote
+ requests.
+ extract_compressed_file (`bool`, defaults to `False`):
+ If `True` and the path point to a zip or tar file,
+ extract the compressed file in a folder along the archive.
+ force_extract (`bool`, defaults to `False`):
+ If `True` when `extract_compressed_file` is `True` and the archive
+ was already extracted, re-extract the archive and override the folder where it was extracted.
+ delete_extracted (`bool`, defaults to `False`):
+ Whether to delete (or keep) the extracted files.
+ extract_on_the_fly (`bool`, defaults to `False`):
+ If `True`, extract compressed files while they are being read.
+ use_etag (`bool`, defaults to `True`):
+ Whether to use the ETag HTTP response header to validate the cached files.
+ num_proc (`int`, *optional*):
+ The number of processes to launch to download the files in parallel.
+ max_retries (`int`, default to `1`):
+ The number of times to retry an HTTP request if it fails.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token
+ for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token
+ for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ ignore_url_params (`bool`, defaults to `False`):
+ Whether to strip all query parameters and fragments from
+ the download URL before using it for caching the file.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
+ download_desc (`str`, *optional*):
+ A description to be displayed alongside with the progress bar while downloading the files.
+ disable_tqdm (`bool`, defaults to `False`):
+ Whether to disable the individual files download progress bar
+ """
+
+ cache_dir: Optional[Union[str, Path]] = None
+ force_download: bool = False
+ resume_download: bool = False
+ local_files_only: bool = False
+ proxies: Optional[Dict] = None
+ user_agent: Optional[str] = None
+ extract_compressed_file: bool = False
+ force_extract: bool = False
+ delete_extracted: bool = False
+ extract_on_the_fly: bool = False
+ use_etag: bool = True
+ num_proc: Optional[int] = None
+ max_retries: int = 1
+ token: Optional[Union[str, bool]] = None
+ use_auth_token: InitVar[Optional[Union[str, bool]]] = "deprecated"
+ ignore_url_params: bool = False
+ storage_options: Dict[str, Any] = field(default_factory=dict)
+ download_desc: Optional[str] = None
+ disable_tqdm: bool = False
+
+ def __post_init__(self, use_auth_token):
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ self.token = use_auth_token
+ if "hf" not in self.storage_options:
+ self.storage_options["hf"] = {"token": self.token, "endpoint": config.HF_ENDPOINT}
+
+ def copy(self) -> "DownloadConfig":
+ return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
+
+ def __setattr__(self, name, value):
+ if name == "token" and getattr(self, "storage_options", None) is not None:
+ if "hf" not in self.storage_options:
+ self.storage_options["hf"] = {"token": value, "endpoint": config.HF_ENDPOINT}
+ elif getattr(self.storage_options["hf"], "token", None) is None:
+ self.storage_options["hf"]["token"] = value
+ super().__setattr__(name, value)
diff --git a/venv/lib/python3.10/site-packages/datasets/download/download_manager.py b/venv/lib/python3.10/site-packages/datasets/download/download_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6c45d94c15209aefdd5bd7ef300b30c1e0f1f56
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/download/download_manager.py
@@ -0,0 +1,448 @@
+# Copyright 2020 The TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Download manager interface."""
+
+import enum
+import io
+import multiprocessing
+import os
+import posixpath
+import warnings
+from datetime import datetime
+from functools import partial
+from typing import Dict, List, Optional, Union
+
+import fsspec
+from fsspec.core import url_to_fs
+from tqdm.contrib.concurrent import thread_map
+
+from .. import config
+from ..utils import tqdm as hf_tqdm
+from ..utils.deprecation_utils import DeprecatedEnum, deprecated
+from ..utils.file_utils import (
+ ArchiveIterable,
+ FilesIterable,
+ cached_path,
+ get_from_cache,
+ hash_url_to_filename,
+ is_relative_path,
+ stack_multiprocessing_download_progress_bars,
+ url_or_path_join,
+)
+from ..utils.info_utils import get_size_checksum_dict
+from ..utils.logging import get_logger, tqdm
+from ..utils.py_utils import NestedDataStructure, map_nested, size_str
+from ..utils.track import tracked_str
+from .download_config import DownloadConfig
+
+
+logger = get_logger(__name__)
+
+
+class DownloadMode(enum.Enum):
+ """`Enum` for how to treat pre-existing downloads and data.
+
+ The default mode is `REUSE_DATASET_IF_EXISTS`, which will reuse both
+ raw downloads and the prepared dataset if they exist.
+
+ The generations modes:
+
+ | | Downloads | Dataset |
+ |-------------------------------------|-----------|---------|
+ | `REUSE_DATASET_IF_EXISTS` (default) | Reuse | Reuse |
+ | `REUSE_CACHE_IF_EXISTS` | Reuse | Fresh |
+ | `FORCE_REDOWNLOAD` | Fresh | Fresh |
+
+ """
+
+ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
+ REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
+ FORCE_REDOWNLOAD = "force_redownload"
+
+
+class GenerateMode(DeprecatedEnum):
+ REUSE_DATASET_IF_EXISTS = "reuse_dataset_if_exists"
+ REUSE_CACHE_IF_EXISTS = "reuse_cache_if_exists"
+ FORCE_REDOWNLOAD = "force_redownload"
+
+ @property
+ def help_message(self):
+ return "Use 'DownloadMode' instead."
+
+
+class DownloadManager:
+ is_streaming = False
+
+ def __init__(
+ self,
+ dataset_name: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ base_path: Optional[str] = None,
+ record_checksums=True,
+ ):
+ """Download manager constructor.
+
+ Args:
+ data_dir:
+ can be used to specify a manual directory to get the files from.
+ dataset_name (`str`):
+ name of dataset this instance will be used for. If
+ provided, downloads will contain which datasets they were used for.
+ download_config (`DownloadConfig`):
+ to specify the cache directory and other
+ download options
+ base_path (`str`):
+ base path that is used when relative paths are used to
+ download files. This can be a remote url.
+ record_checksums (`bool`, defaults to `True`):
+ Whether to record the checksums of the downloaded files. If None, the value is inferred from the builder.
+ """
+ self._dataset_name = dataset_name
+ self._data_dir = data_dir
+ self._base_path = base_path or os.path.abspath(".")
+ # To record what is being used: {url: {num_bytes: int, checksum: str}}
+ self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {}
+ self.record_checksums = record_checksums
+ self.download_config = download_config or DownloadConfig()
+ self.downloaded_paths = {}
+ self.extracted_paths = {}
+
+ @property
+ def manual_dir(self):
+ return self._data_dir
+
+ @property
+ def downloaded_size(self):
+ """Returns the total size of downloaded files."""
+ return sum(checksums_dict["num_bytes"] for checksums_dict in self._recorded_sizes_checksums.values())
+
+ @staticmethod
+ def ship_files_with_pipeline(downloaded_path_or_paths, pipeline):
+ """Ship the files using Beam FileSystems to the pipeline temp dir.
+
+ Args:
+ downloaded_path_or_paths (`str` or `list[str]` or `dict[str, str]`):
+ Nested structure containing the
+ downloaded path(s).
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
+ Apache Beam Pipeline.
+
+ Returns:
+ `str` or `list[str]` or `dict[str, str]`
+ """
+ from ..utils.beam_utils import upload_local_to_remote
+
+ remote_dir = pipeline._options.get_all_options().get("temp_location")
+ if remote_dir is None:
+ raise ValueError("You need to specify 'temp_location' in PipelineOptions to upload files")
+
+ def upload(local_file_path):
+ remote_file_path = posixpath.join(
+ remote_dir, config.DOWNLOADED_DATASETS_DIR, os.path.basename(local_file_path)
+ )
+ logger.info(
+ f"Uploading {local_file_path} ({size_str(os.path.getsize(local_file_path))}) to {remote_file_path}."
+ )
+ upload_local_to_remote(local_file_path, remote_file_path)
+ return remote_file_path
+
+ uploaded_path_or_paths = map_nested(
+ lambda local_file_path: upload(local_file_path),
+ downloaded_path_or_paths,
+ )
+ return uploaded_path_or_paths
+
+ def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure):
+ """Record size/checksum of downloaded files."""
+ delay = 5
+ for url, path in hf_tqdm(
+ list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())),
+ delay=delay,
+ desc="Computing checksums",
+ ):
+ # call str to support PathLike objects
+ self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(
+ path, record_checksum=self.record_checksums
+ )
+
+ @deprecated("Use `.download`/`.download_and_extract` with `fsspec` URLs instead.")
+ def download_custom(self, url_or_urls, custom_download):
+ """
+ Download given urls(s) by calling `custom_download`.
+
+ Args:
+ url_or_urls (`str` or `list` or `dict`):
+ URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
+ custom_download (`Callable[src_url, dst_path]`):
+ The source URL and destination path. For example
+ `tf.io.gfile.copy`, that lets you download from Google storage.
+
+ Returns:
+ downloaded_path(s): `str`, The downloaded paths matching the given input
+ `url_or_urls`.
+
+ Example:
+
+ ```py
+ >>> downloaded_files = dl_manager.download_custom('s3://my-bucket/data.zip', custom_download_for_my_private_bucket)
+ ```
+ """
+ cache_dir = self.download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
+ max_retries = self.download_config.max_retries
+
+ def url_to_downloaded_path(url):
+ return os.path.join(cache_dir, hash_url_to_filename(url))
+
+ downloaded_path_or_paths = map_nested(url_to_downloaded_path, url_or_urls)
+ url_or_urls = NestedDataStructure(url_or_urls)
+ downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
+ for url, path in zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()):
+ try:
+ get_from_cache(
+ url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
+ )
+ cached = True
+ except FileNotFoundError:
+ cached = False
+ if not cached or self.download_config.force_download:
+ custom_download(url, path)
+ get_from_cache(
+ url, cache_dir=cache_dir, local_files_only=True, use_etag=False, max_retries=max_retries
+ )
+ self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
+ return downloaded_path_or_paths.data
+
+ def download(self, url_or_urls):
+ """Download given URL(s).
+
+ By default, only one process is used for download. Pass customized `download_config.num_proc` to change this behavior.
+
+ Args:
+ url_or_urls (`str` or `list` or `dict`):
+ URL or `list` or `dict` of URLs to download. Each URL is a `str`.
+
+ Returns:
+ `str` or `list` or `dict`:
+ The downloaded paths matching the given input `url_or_urls`.
+
+ Example:
+
+ ```py
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
+ ```
+ """
+ download_config = self.download_config.copy()
+ download_config.extract_compressed_file = False
+ if download_config.download_desc is None:
+ download_config.download_desc = "Downloading data"
+
+ download_func = partial(self._download_batched, download_config=download_config)
+
+ start_time = datetime.now()
+ with stack_multiprocessing_download_progress_bars():
+ downloaded_path_or_paths = map_nested(
+ download_func,
+ url_or_urls,
+ map_tuple=True,
+ num_proc=download_config.num_proc,
+ desc="Downloading data files",
+ batched=True,
+ batch_size=-1,
+ )
+ duration = datetime.now() - start_time
+ logger.info(f"Downloading took {duration.total_seconds() // 60} min")
+ url_or_urls = NestedDataStructure(url_or_urls)
+ downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths)
+ self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())))
+
+ start_time = datetime.now()
+ self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths)
+ duration = datetime.now() - start_time
+ logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min")
+
+ return downloaded_path_or_paths.data
+
+ def _download_batched(
+ self,
+ url_or_filenames: List[str],
+ download_config: DownloadConfig,
+ ) -> List[str]:
+ if len(url_or_filenames) >= 16:
+ download_config = download_config.copy()
+ download_config.disable_tqdm = True
+ download_func = partial(self._download_single, download_config=download_config)
+
+ fs: fsspec.AbstractFileSystem
+ fs, path = url_to_fs(url_or_filenames[0], **download_config.storage_options)
+ size = 0
+ try:
+ size = fs.info(path).get("size", 0)
+ except Exception:
+ pass
+ max_workers = (
+ config.HF_DATASETS_MULTITHREADING_MAX_WORKERS if size < (20 << 20) else 1
+ ) # enable multithreading if files are small
+
+ return thread_map(
+ download_func,
+ url_or_filenames,
+ desc=download_config.download_desc or "Downloading",
+ unit="files",
+ position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
+ if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
+ and multiprocessing.current_process()._identity
+ else None,
+ max_workers=max_workers,
+ tqdm_class=tqdm,
+ )
+ else:
+ return [
+ self._download_single(url_or_filename, download_config=download_config)
+ for url_or_filename in url_or_filenames
+ ]
+
+ def _download_single(self, url_or_filename: str, download_config: DownloadConfig) -> str:
+ url_or_filename = str(url_or_filename)
+ if is_relative_path(url_or_filename):
+ # append the relative path to the base_path
+ url_or_filename = url_or_path_join(self._base_path, url_or_filename)
+ out = cached_path(url_or_filename, download_config=download_config)
+ out = tracked_str(out)
+ out.set_origin(url_or_filename)
+ return out
+
+ def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]):
+ """Iterate over files within an archive.
+
+ Args:
+ path_or_buf (`str` or `io.BufferedReader`):
+ Archive path or archive binary file object.
+
+ Yields:
+ `tuple[str, io.BufferedReader]`:
+ 2-tuple (path_within_archive, file_object).
+ File object is opened in binary mode.
+
+ Example:
+
+ ```py
+ >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
+ >>> files = dl_manager.iter_archive(archive)
+ ```
+ """
+
+ if hasattr(path_or_buf, "read"):
+ return ArchiveIterable.from_buf(path_or_buf)
+ else:
+ return ArchiveIterable.from_urlpath(path_or_buf)
+
+ def iter_files(self, paths: Union[str, List[str]]):
+ """Iterate over file paths.
+
+ Args:
+ paths (`str` or `list` of `str`):
+ Root paths.
+
+ Yields:
+ `str`: File path.
+
+ Example:
+
+ ```py
+ >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
+ >>> files = dl_manager.iter_files(files)
+ ```
+ """
+ return FilesIterable.from_urlpaths(paths)
+
+ def extract(self, path_or_paths, num_proc="deprecated"):
+ """Extract given path(s).
+
+ Args:
+ path_or_paths (path or `list` or `dict`):
+ Path of file to extract. Each path is a `str`.
+ num_proc (`int`):
+ Use multi-processing if `num_proc` > 1 and the length of
+ `path_or_paths` is larger than `num_proc`.
+
+
+
+ Pass `DownloadConfig(num_proc=)` to the initializer instead.
+
+
+
+ Returns:
+ extracted_path(s): `str`, The extracted paths matching the given input
+ path_or_paths.
+
+ Example:
+
+ ```py
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
+ >>> extracted_files = dl_manager.extract(downloaded_files)
+ ```
+ """
+ if num_proc != "deprecated":
+ warnings.warn(
+ "'num_proc' was deprecated in version 2.6.2 and will be removed in 3.0.0. Pass `DownloadConfig(num_proc=)` to the initializer instead.",
+ FutureWarning,
+ )
+ download_config = self.download_config.copy()
+ download_config.extract_compressed_file = True
+ extract_func = partial(self._download_single, download_config=download_config)
+ extracted_paths = map_nested(
+ extract_func,
+ path_or_paths,
+ num_proc=download_config.num_proc,
+ desc="Extracting data files",
+ )
+ path_or_paths = NestedDataStructure(path_or_paths)
+ extracted_paths = NestedDataStructure(extracted_paths)
+ self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten())))
+ return extracted_paths.data
+
+ def download_and_extract(self, url_or_urls):
+ """Download and extract given `url_or_urls`.
+
+ Is roughly equivalent to:
+
+ ```
+ extracted_paths = dl_manager.extract(dl_manager.download(url_or_urls))
+ ```
+
+ Args:
+ url_or_urls (`str` or `list` or `dict`):
+ URL or `list` or `dict` of URLs to download and extract. Each URL is a `str`.
+
+ Returns:
+ extracted_path(s): `str`, extracted paths of given URL(s).
+ """
+ return self.extract(self.download(url_or_urls))
+
+ def get_recorded_sizes_checksums(self):
+ return self._recorded_sizes_checksums.copy()
+
+ def delete_extracted_files(self):
+ paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values())
+ for key, path in list(self.extracted_paths.items()):
+ if path in paths_to_delete and os.path.isfile(path):
+ os.remove(path)
+ del self.extracted_paths[key]
+
+ def manage_extracted_files(self):
+ if self.download_config.delete_extracted:
+ self.delete_extracted_files()
diff --git a/venv/lib/python3.10/site-packages/datasets/download/mock_download_manager.py b/venv/lib/python3.10/site-packages/datasets/download/mock_download_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c71103a536b2a725ebb1d1dfe239b80baedc740
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/download/mock_download_manager.py
@@ -0,0 +1,244 @@
+# Copyright 2020 The TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Mock download manager interface."""
+
+import os
+import re
+import urllib.parse
+from pathlib import Path
+from typing import Callable, List, Optional, Union
+from zipfile import ZipFile
+
+from ..utils.file_utils import cached_path, hf_github_url
+from ..utils.logging import get_logger
+from ..utils.version import Version
+
+
+logger = get_logger(__name__)
+
+
+class MockDownloadManager:
+ dummy_file_name = "dummy_data"
+ datasets_scripts_dir = "datasets"
+ is_streaming = False
+
+ def __init__(
+ self,
+ dataset_name: str,
+ config: str,
+ version: Union[Version, str],
+ cache_dir: Optional[str] = None,
+ use_local_dummy_data: bool = False,
+ load_existing_dummy_data: bool = True,
+ download_callbacks: Optional[List[Callable]] = None,
+ ):
+ self.downloaded_size = 0
+ self.dataset_name = dataset_name
+ self.cache_dir = cache_dir
+ self.use_local_dummy_data = use_local_dummy_data
+ self.config = config
+ # download_callbacks take a single url as input
+ self.download_callbacks: List[Callable] = download_callbacks or []
+ # if False, it doesn't load existing files and it returns the paths of the dummy files relative
+ # to the dummy_data zip file root
+ self.load_existing_dummy_data = load_existing_dummy_data
+
+ # TODO(PVP, QL) might need to make this more general
+ self.version_name = str(version)
+ # to be downloaded
+ self._dummy_file = None
+ self._bucket_url = None
+
+ @property
+ def dummy_file(self):
+ if self._dummy_file is None:
+ self._dummy_file = self.download_dummy_data()
+ return self._dummy_file
+
+ @property
+ def dummy_data_folder(self):
+ if self.config is not None:
+ # structure is dummy / config_name / version_name
+ return os.path.join("dummy", self.config.name, self.version_name)
+ # structure is dummy / version_name
+ return os.path.join("dummy", self.version_name)
+
+ @property
+ def dummy_zip_file(self):
+ return os.path.join(self.dummy_data_folder, "dummy_data.zip")
+
+ def download_dummy_data(self):
+ path_to_dummy_data_dir = (
+ self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
+ )
+
+ local_path = cached_path(
+ path_to_dummy_data_dir, cache_dir=self.cache_dir, extract_compressed_file=True, force_extract=True
+ )
+
+ return os.path.join(local_path, self.dummy_file_name)
+
+ @property
+ def local_path_to_dummy_data(self):
+ return os.path.join(self.datasets_scripts_dir, self.dataset_name, self.dummy_zip_file)
+
+ @property
+ def github_path_to_dummy_data(self):
+ if self._bucket_url is None:
+ self._bucket_url = hf_github_url(self.dataset_name, self.dummy_zip_file.replace(os.sep, "/"))
+ return self._bucket_url
+
+ @property
+ def manual_dir(self):
+ # return full path if its a dir
+ if os.path.isdir(self.dummy_file):
+ return self.dummy_file
+ # else cut off path to file -> example `xsum`.
+ return "/".join(self.dummy_file.replace(os.sep, "/").split("/")[:-1])
+
+ # this function has to be in the manager under this name so that testing works
+ def download_and_extract(self, data_url, *args):
+ if self.load_existing_dummy_data:
+ # dummy data is downloaded and tested
+ dummy_file = self.dummy_file
+ else:
+ # dummy data cannot be downloaded and only the path to dummy file is returned
+ dummy_file = self.dummy_file_name
+
+ # special case when data_url is a dict
+ if isinstance(data_url, dict):
+ return self.create_dummy_data_dict(dummy_file, data_url)
+ elif isinstance(data_url, (list, tuple)):
+ return self.create_dummy_data_list(dummy_file, data_url)
+ else:
+ return self.create_dummy_data_single(dummy_file, data_url)
+
+ # this function has to be in the manager under this name so that testing works
+ def download(self, data_url, *args):
+ return self.download_and_extract(data_url)
+
+ # this function has to be in the manager under this name so that testing works
+ def download_custom(self, data_url, custom_download):
+ return self.download_and_extract(data_url)
+
+ # this function has to be in the manager under this name so that testing works
+ def extract(self, path, *args, **kwargs):
+ return path
+
+ # this function has to be in the manager under this name so that testing works
+ def get_recorded_sizes_checksums(self):
+ return {}
+
+ def create_dummy_data_dict(self, path_to_dummy_data, data_url):
+ dummy_data_dict = {}
+ for key, single_urls in data_url.items():
+ for download_callback in self.download_callbacks:
+ if isinstance(single_urls, list):
+ for single_url in single_urls:
+ download_callback(single_url)
+ else:
+ single_url = single_urls
+ download_callback(single_url)
+ # we force the name of each key to be the last file / folder name of the url path
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
+ if isinstance(single_urls, list):
+ value = [os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(x).name)) for x in single_urls]
+ else:
+ single_url = single_urls
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(Path(single_url).name))
+ dummy_data_dict[key] = value
+
+ # make sure that values are unique
+ if all(isinstance(i, str) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
+ dummy_data_dict.values()
+ ):
+ # append key to value to make its name unique
+ dummy_data_dict = {key: value + key for key, value in dummy_data_dict.items()}
+
+ return dummy_data_dict
+
+ def create_dummy_data_list(self, path_to_dummy_data, data_url):
+ dummy_data_list = []
+ # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
+ is_tf_records = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}", url)) for url in data_url)
+ is_pubmed_records = all(
+ url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url
+ )
+ if data_url and (is_tf_records or is_pubmed_records):
+ data_url = [data_url[0]] * len(data_url)
+ for single_url in data_url:
+ for download_callback in self.download_callbacks:
+ download_callback(single_url)
+ # we force the name of each key to be the last file / folder name of the url path
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(single_url.split("/")[-1]))
+ dummy_data_list.append(value)
+ return dummy_data_list
+
+ def create_dummy_data_single(self, path_to_dummy_data, data_url):
+ for download_callback in self.download_callbacks:
+ download_callback(data_url)
+ # we force the name of each key to be the last file / folder name of the url path
+ # if the url has arguments, we need to encode them with urllib.parse.quote_plus
+ value = os.path.join(path_to_dummy_data, urllib.parse.quote_plus(data_url.split("/")[-1]))
+ if os.path.exists(value) or not self.load_existing_dummy_data:
+ return value
+ else:
+ # Backward compatibility, maybe deprecate at one point.
+ # For many datasets with single url calls to dl_manager.download_and_extract,
+ # the dummy_data.zip file is actually the zipped downloaded file
+ # while now we expected the dummy_data.zip file to be a directory containing
+ # the downloaded file.
+ return path_to_dummy_data
+
+ def delete_extracted_files(self):
+ pass
+
+ def manage_extracted_files(self):
+ pass
+
+ def iter_archive(self, path):
+ def _iter_archive_members(path):
+ # this preserves the order of the members inside the ZIP archive
+ dummy_parent_path = Path(self.dummy_file).parent
+ relative_path = path.relative_to(dummy_parent_path)
+ with ZipFile(self.local_path_to_dummy_data) as zip_file:
+ members = zip_file.namelist()
+ for member in members:
+ if member.startswith(relative_path.as_posix()):
+ yield dummy_parent_path.joinpath(member)
+
+ path = Path(path)
+ file_paths = _iter_archive_members(path) if self.use_local_dummy_data else path.rglob("*")
+ for file_path in file_paths:
+ if file_path.is_file() and not file_path.name.startswith((".", "__")):
+ yield file_path.relative_to(path).as_posix(), file_path.open("rb")
+
+ def iter_files(self, paths):
+ if not isinstance(paths, list):
+ paths = [paths]
+ for path in paths:
+ if os.path.isfile(path):
+ yield path
+ else:
+ for dirpath, dirnames, filenames in os.walk(path):
+ if os.path.basename(dirpath).startswith((".", "__")):
+ continue
+ dirnames.sort()
+ for filename in sorted(filenames):
+ if filename.startswith((".", "__")):
+ continue
+ yield os.path.join(dirpath, filename)
diff --git a/venv/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py b/venv/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a1d1e3a53cfc1db3e873e1f9ab3da9a157db3c0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/download/streaming_download_manager.py
@@ -0,0 +1,210 @@
+import io
+import os
+from typing import Iterable, List, Optional, Tuple, Union
+
+from ..utils.file_utils import ( # noqa: F401 # backward compatibility
+ SINGLE_FILE_COMPRESSION_PROTOCOLS,
+ ArchiveIterable,
+ FilesIterable,
+ _get_extraction_protocol,
+ _get_path_extension,
+ _prepare_path_and_storage_options,
+ is_relative_path,
+ url_or_path_join,
+ xbasename,
+ xdirname,
+ xet_parse,
+ xexists,
+ xgetsize,
+ xglob,
+ xgzip_open,
+ xisdir,
+ xisfile,
+ xjoin,
+ xlistdir,
+ xnumpy_load,
+ xopen,
+ xpandas_read_csv,
+ xpandas_read_excel,
+ xPath,
+ xpyarrow_parquet_read_table,
+ xrelpath,
+ xsio_loadmat,
+ xsplit,
+ xsplitext,
+ xwalk,
+ xxml_dom_minidom_parse,
+)
+from ..utils.logging import get_logger
+from ..utils.py_utils import map_nested
+from .download_config import DownloadConfig
+
+
+logger = get_logger(__name__)
+
+
+class StreamingDownloadManager:
+ """
+ Download manager that uses the "::" separator to navigate through (possibly remote) compressed archives.
+ Contrary to the regular `DownloadManager`, the `download` and `extract` methods don't actually download nor extract
+ data, but they rather return the path or url that could be opened using the `xopen` function which extends the
+ built-in `open` function to stream data from remote files.
+ """
+
+ is_streaming = True
+
+ def __init__(
+ self,
+ dataset_name: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ base_path: Optional[str] = None,
+ ):
+ self._dataset_name = dataset_name
+ self._data_dir = data_dir
+ self._base_path = base_path or os.path.abspath(".")
+ self.download_config = download_config or DownloadConfig()
+
+ @property
+ def manual_dir(self):
+ return self._data_dir
+
+ def download(self, url_or_urls):
+ """Normalize URL(s) of files to stream data from.
+ This is the lazy version of `DownloadManager.download` for streaming.
+
+ Args:
+ url_or_urls (`str` or `list` or `dict`):
+ URL(s) of files to stream data from. Each url is a `str`.
+
+ Returns:
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input url_or_urls.
+
+ Example:
+
+ ```py
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
+ ```
+ """
+ url_or_urls = map_nested(self._download_single, url_or_urls, map_tuple=True)
+ return url_or_urls
+
+ def _download_single(self, urlpath: str) -> str:
+ urlpath = str(urlpath)
+ if is_relative_path(urlpath):
+ # append the relative path to the base_path
+ urlpath = url_or_path_join(self._base_path, urlpath)
+ return urlpath
+
+ def extract(self, url_or_urls):
+ """Add extraction protocol for given url(s) for streaming.
+
+ This is the lazy version of `DownloadManager.extract` for streaming.
+
+ Args:
+ url_or_urls (`str` or `list` or `dict`):
+ URL(s) of files to stream data from. Each url is a `str`.
+
+ Returns:
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
+
+ Example:
+
+ ```py
+ >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
+ >>> extracted_files = dl_manager.extract(downloaded_files)
+ ```
+ """
+ urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True)
+ return urlpaths
+
+ def _extract(self, urlpath: str) -> str:
+ urlpath = str(urlpath)
+ protocol = _get_extraction_protocol(urlpath, download_config=self.download_config)
+ # get inner file: zip://train-00000.json.gz::https://foo.bar/data.zip -> zip://train-00000.json.gz
+ path = urlpath.split("::")[0]
+ extension = _get_path_extension(path)
+ if extension in ["tgz", "tar"] or path.endswith((".tar.gz", ".tar.bz2", ".tar.xz")):
+ raise NotImplementedError(
+ f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. "
+ f"Please use `dl_manager.iter_archive` instead.\n\n"
+ f"Example usage:\n\n"
+ f"\turl = dl_manager.download(url)\n"
+ f"\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n"
+ f"\tfor filename, file in tar_archive_iterator:\n"
+ f"\t\t..."
+ )
+ if protocol is None:
+ # no extraction
+ return urlpath
+ elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS:
+ # there is one single file which is the uncompressed file
+ inner_file = os.path.basename(urlpath.split("::")[0])
+ inner_file = inner_file[: inner_file.rindex(".")] if "." in inner_file else inner_file
+ return f"{protocol}://{inner_file}::{urlpath}"
+ else:
+ return f"{protocol}://::{urlpath}"
+
+ def download_and_extract(self, url_or_urls):
+ """Prepare given `url_or_urls` for streaming (add extraction protocol).
+
+ This is the lazy version of `DownloadManager.download_and_extract` for streaming.
+
+ Is equivalent to:
+
+ ```
+ urls = dl_manager.extract(dl_manager.download(url_or_urls))
+ ```
+
+ Args:
+ url_or_urls (`str` or `list` or `dict`):
+ URL(s) to stream from data from. Each url is a `str`.
+
+ Returns:
+ url(s): (`str` or `list` or `dict`), URL(s) to stream data from matching the given input `url_or_urls`.
+ """
+ return self.extract(self.download(url_or_urls))
+
+ def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]:
+ """Iterate over files within an archive.
+
+ Args:
+ urlpath_or_buf (`str` or `io.BufferedReader`):
+ Archive path or archive binary file object.
+
+ Yields:
+ `tuple[str, io.BufferedReader]`:
+ 2-tuple (path_within_archive, file_object).
+ File object is opened in binary mode.
+
+ Example:
+
+ ```py
+ >>> archive = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')
+ >>> files = dl_manager.iter_archive(archive)
+ ```
+ """
+
+ if hasattr(urlpath_or_buf, "read"):
+ return ArchiveIterable.from_buf(urlpath_or_buf)
+ else:
+ return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config)
+
+ def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]:
+ """Iterate over files.
+
+ Args:
+ urlpaths (`str` or `list` of `str`):
+ Root paths.
+
+ Yields:
+ str: File URL path.
+
+ Example:
+
+ ```py
+ >>> files = dl_manager.download_and_extract('https://huggingface.co/datasets/beans/resolve/main/data/train.zip')
+ >>> files = dl_manager.iter_files(files)
+ ```
+ """
+ return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config)
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7062dd9897912dba28a6ba4504c4471804f8e95
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8dfbc0b6e83e7c2d6ac0e2a7bf41b2c3f1453616
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/__pycache__/cache.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..9085b22078b6010ce1e4137573b5b884f56f487b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/cache/cache.py
@@ -0,0 +1,207 @@
+import glob
+import json
+import os
+import shutil
+import time
+import warnings
+from pathlib import Path
+from typing import List, Optional, Tuple, Union
+
+import pyarrow as pa
+
+import datasets
+import datasets.config
+import datasets.data_files
+from datasets.naming import camelcase_to_snakecase, filenames_for_dataset_split
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+def _get_modification_time(cached_directory_path):
+ return (Path(cached_directory_path)).stat().st_mtime
+
+
+def _find_hash_in_cache(
+ dataset_name: str,
+ config_name: Optional[str],
+ cache_dir: Optional[str],
+ config_kwargs: dict,
+ custom_features: Optional[datasets.Features],
+) -> Tuple[str, str, str]:
+ if config_name or config_kwargs or custom_features:
+ config_id = datasets.BuilderConfig(config_name or "default").create_config_id(
+ config_kwargs=config_kwargs, custom_features=custom_features
+ )
+ else:
+ config_id = None
+ cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE))
+ namespace_and_dataset_name = dataset_name.split("/")
+ namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
+ cached_relative_path = "___".join(namespace_and_dataset_name)
+ cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
+ cached_directory_paths = [
+ cached_directory_path
+ for cached_directory_path in glob.glob(
+ os.path.join(cached_datasets_directory_path_root, config_id or "*", "*", "*")
+ )
+ if os.path.isdir(cached_directory_path)
+ and (
+ config_kwargs
+ or custom_features
+ or json.loads(Path(cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]
+ == Path(cached_directory_path).parts[-3] # no extra params => config_id == config_name
+ )
+ ]
+ if not cached_directory_paths:
+ cached_directory_paths = [
+ cached_directory_path
+ for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
+ if os.path.isdir(cached_directory_path)
+ ]
+ available_configs = sorted(
+ {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths}
+ )
+ raise ValueError(
+ f"Couldn't find cache for {dataset_name}"
+ + (f" for config '{config_id}'" if config_id else "")
+ + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "")
+ )
+ # get most recent
+ cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1])
+ version, hash = cached_directory_path.parts[-2:]
+ other_configs = [
+ Path(_cached_directory_path).parts[-3]
+ for _cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash))
+ if os.path.isdir(_cached_directory_path)
+ and (
+ config_kwargs
+ or custom_features
+ or json.loads(Path(_cached_directory_path, "dataset_info.json").read_text(encoding="utf-8"))["config_name"]
+ == Path(_cached_directory_path).parts[-3] # no extra params => config_id == config_name
+ )
+ ]
+ if not config_id and len(other_configs) > 1:
+ raise ValueError(
+ f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}"
+ f"\nPlease specify which configuration to reload from the cache, e.g."
+ f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')"
+ )
+ config_name = cached_directory_path.parts[-3]
+ warning_msg = (
+ f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} "
+ f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})."
+ )
+ logger.warning(warning_msg)
+ return config_name, version, hash
+
+
+class Cache(datasets.ArrowBasedBuilder):
+ def __init__(
+ self,
+ cache_dir: Optional[str] = None,
+ dataset_name: Optional[str] = None,
+ config_name: Optional[str] = None,
+ version: Optional[str] = "0.0.0",
+ hash: Optional[str] = None,
+ base_path: Optional[str] = None,
+ info: Optional[datasets.DatasetInfo] = None,
+ features: Optional[datasets.Features] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ repo_id: Optional[str] = None,
+ data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]] = None,
+ data_dir: Optional[str] = None,
+ storage_options: Optional[dict] = None,
+ writer_batch_size: Optional[int] = None,
+ name="deprecated",
+ **config_kwargs,
+ ):
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if name != "deprecated":
+ warnings.warn(
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+ config_name = name
+ if repo_id is None and dataset_name is None:
+ raise ValueError("repo_id or dataset_name is required for the Cache dataset builder")
+ if data_files is not None:
+ config_kwargs["data_files"] = data_files
+ if data_dir is not None:
+ config_kwargs["data_dir"] = data_dir
+ if hash == "auto" and version == "auto":
+ config_name, version, hash = _find_hash_in_cache(
+ dataset_name=repo_id or dataset_name,
+ config_name=config_name,
+ cache_dir=cache_dir,
+ config_kwargs=config_kwargs,
+ custom_features=features,
+ )
+ elif hash == "auto" or version == "auto":
+ raise NotImplementedError("Pass both hash='auto' and version='auto' instead")
+ super().__init__(
+ cache_dir=cache_dir,
+ dataset_name=dataset_name,
+ config_name=config_name,
+ version=version,
+ hash=hash,
+ base_path=base_path,
+ info=info,
+ token=token,
+ repo_id=repo_id,
+ storage_options=storage_options,
+ writer_batch_size=writer_batch_size,
+ )
+
+ def _info(self) -> datasets.DatasetInfo:
+ return datasets.DatasetInfo()
+
+ def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs):
+ if not os.path.exists(self.cache_dir):
+ raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}")
+ if output_dir is not None and output_dir != self.cache_dir:
+ shutil.copytree(self.cache_dir, output_dir)
+
+ def _split_generators(self, dl_manager):
+ # used to stream from cache
+ if isinstance(self.info.splits, datasets.SplitDict):
+ split_infos: List[datasets.SplitInfo] = list(self.info.splits.values())
+ else:
+ raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}")
+ return [
+ datasets.SplitGenerator(
+ name=split_info.name,
+ gen_kwargs={
+ "files": filenames_for_dataset_split(
+ self.cache_dir,
+ dataset_name=self.dataset_name,
+ split=split_info.name,
+ filetype_suffix="arrow",
+ shard_lengths=split_info.shard_lengths,
+ )
+ },
+ )
+ for split_info in split_infos
+ ]
+
+ def _generate_tables(self, files):
+ # used to stream from cache
+ for file_idx, file in enumerate(files):
+ with open(file, "rb") as f:
+ try:
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
+ pa_table = pa.Table.from_batches([record_batch])
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield f"{file_idx}_{batch_idx}", pa_table
+ except ValueError as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28f8af6fbba8534a7420180420e9c6bf6326cdc0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/__pycache__/csv.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py
new file mode 100644
index 0000000000000000000000000000000000000000..181f52799b4f9e998ec585581dedbbe59fd0611c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py
@@ -0,0 +1,203 @@
+import itertools
+from dataclasses import dataclass
+from typing import Any, Callable, Dict, List, Optional, Union
+
+import pandas as pd
+import pyarrow as pa
+
+import datasets
+import datasets.config
+from datasets.features.features import require_storage_cast
+from datasets.table import table_cast
+from datasets.utils.py_utils import Literal
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+_PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"]
+_PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
+_PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"]
+_PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"]
+_PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS = ["verbose"]
+
+
+@dataclass
+class CsvConfig(datasets.BuilderConfig):
+ """BuilderConfig for CSV."""
+
+ sep: str = ","
+ delimiter: Optional[str] = None
+ header: Optional[Union[int, List[int], str]] = "infer"
+ names: Optional[List[str]] = None
+ column_names: Optional[List[str]] = None
+ index_col: Optional[Union[int, str, List[int], List[str]]] = None
+ usecols: Optional[Union[List[int], List[str]]] = None
+ prefix: Optional[str] = None
+ mangle_dupe_cols: bool = True
+ engine: Optional[Literal["c", "python", "pyarrow"]] = None
+ converters: Dict[Union[int, str], Callable[[Any], Any]] = None
+ true_values: Optional[list] = None
+ false_values: Optional[list] = None
+ skipinitialspace: bool = False
+ skiprows: Optional[Union[int, List[int]]] = None
+ nrows: Optional[int] = None
+ na_values: Optional[Union[str, List[str]]] = None
+ keep_default_na: bool = True
+ na_filter: bool = True
+ verbose: bool = False
+ skip_blank_lines: bool = True
+ thousands: Optional[str] = None
+ decimal: str = "."
+ lineterminator: Optional[str] = None
+ quotechar: str = '"'
+ quoting: int = 0
+ escapechar: Optional[str] = None
+ comment: Optional[str] = None
+ encoding: Optional[str] = None
+ dialect: Optional[str] = None
+ error_bad_lines: bool = True
+ warn_bad_lines: bool = True
+ skipfooter: int = 0
+ doublequote: bool = True
+ memory_map: bool = False
+ float_precision: Optional[str] = None
+ chunksize: int = 10_000
+ features: Optional[datasets.Features] = None
+ encoding_errors: Optional[str] = "strict"
+ on_bad_lines: Literal["error", "warn", "skip"] = "error"
+ date_format: Optional[str] = None
+
+ def __post_init__(self):
+ if self.delimiter is not None:
+ self.sep = self.delimiter
+ if self.column_names is not None:
+ self.names = self.column_names
+
+ @property
+ def pd_read_csv_kwargs(self):
+ pd_read_csv_kwargs = {
+ "sep": self.sep,
+ "header": self.header,
+ "names": self.names,
+ "index_col": self.index_col,
+ "usecols": self.usecols,
+ "prefix": self.prefix,
+ "mangle_dupe_cols": self.mangle_dupe_cols,
+ "engine": self.engine,
+ "converters": self.converters,
+ "true_values": self.true_values,
+ "false_values": self.false_values,
+ "skipinitialspace": self.skipinitialspace,
+ "skiprows": self.skiprows,
+ "nrows": self.nrows,
+ "na_values": self.na_values,
+ "keep_default_na": self.keep_default_na,
+ "na_filter": self.na_filter,
+ "verbose": self.verbose,
+ "skip_blank_lines": self.skip_blank_lines,
+ "thousands": self.thousands,
+ "decimal": self.decimal,
+ "lineterminator": self.lineterminator,
+ "quotechar": self.quotechar,
+ "quoting": self.quoting,
+ "escapechar": self.escapechar,
+ "comment": self.comment,
+ "encoding": self.encoding,
+ "dialect": self.dialect,
+ "error_bad_lines": self.error_bad_lines,
+ "warn_bad_lines": self.warn_bad_lines,
+ "skipfooter": self.skipfooter,
+ "doublequote": self.doublequote,
+ "memory_map": self.memory_map,
+ "float_precision": self.float_precision,
+ "chunksize": self.chunksize,
+ "encoding_errors": self.encoding_errors,
+ "on_bad_lines": self.on_bad_lines,
+ "date_format": self.date_format,
+ }
+
+ # some kwargs must not be passed if they don't have a default value
+ # some others are deprecated and we can also not pass them if they are the default value
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
+ if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
+
+ # Remove 1.3 new arguments
+ if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
+
+ # Remove 2.0 new arguments
+ if not (datasets.config.PANDAS_VERSION.major >= 2):
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
+
+ # Remove 2.2 deprecated arguments
+ if datasets.config.PANDAS_VERSION.release >= (2, 2):
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS:
+ if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
+
+ return pd_read_csv_kwargs
+
+
+class Csv(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = CsvConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ files = [dl_manager.iter_files(file) for file in files]
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ schema = self.config.features.arrow_schema
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
+ # cheaper cast
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
+ else:
+ # more expensive cast; allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ schema = self.config.features.arrow_schema if self.config.features else None
+ # dtype allows reading an int column as str
+ dtype = (
+ {
+ name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object
+ for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())
+ }
+ if schema is not None
+ else None
+ )
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs)
+ try:
+ for batch_idx, df in enumerate(csv_file_reader):
+ pa_table = pa.Table.from_pandas(df)
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
+ except ValueError as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d7fb7f77adb0ecbc2bfc21d6aff9af34c3133cab
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..867399267f003edde4db2d4c582e5d0d0f876aed
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/json.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/json.py
new file mode 100644
index 0000000000000000000000000000000000000000..6076af6b37a45a0eee11b35310552ba720136787
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/json/json.py
@@ -0,0 +1,180 @@
+import io
+import itertools
+import json
+from dataclasses import dataclass
+from typing import Optional
+
+import pyarrow as pa
+import pyarrow.json as paj
+
+import datasets
+from datasets.table import table_cast
+from datasets.utils.file_utils import readline
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class JsonConfig(datasets.BuilderConfig):
+ """BuilderConfig for JSON."""
+
+ features: Optional[datasets.Features] = None
+ encoding: str = "utf-8"
+ encoding_errors: Optional[str] = None
+ field: Optional[str] = None
+ use_threads: bool = True # deprecated
+ block_size: Optional[int] = None # deprecated
+ chunksize: int = 10 << 20 # 10MB
+ newlines_in_values: Optional[bool] = None
+
+
+class Json(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = JsonConfig
+
+ def _info(self):
+ if self.config.block_size is not None:
+ logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
+ self.config.chunksize = self.config.block_size
+ if self.config.use_threads is not True:
+ logger.warning(
+ "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore."
+ )
+ if self.config.newlines_in_values is not None:
+ raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ dl_manager.download_config.extract_on_the_fly = True
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ files = [dl_manager.iter_files(file) for file in files]
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ # adding missing columns
+ for column_name in set(self.config.features) - set(pa_table.column_names):
+ type = self.config.features.arrow_schema.field(column_name).type
+ pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type))
+ # more expensive cast to support nested structures with keys in a different order
+ # allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, self.config.features.arrow_schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ # If the file is one json object and if we need to look at the list of items in one specific field
+ if self.config.field is not None:
+ with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
+ dataset = json.load(f)
+
+ # We keep only the field we are interested in
+ dataset = dataset[self.config.field]
+
+ # We accept two format: a list of dicts or a dict of lists
+ if isinstance(dataset, (list, tuple)):
+ keys = set().union(*[row.keys() for row in dataset])
+ mapping = {col: [row.get(col) for row in dataset] for col in keys}
+ else:
+ mapping = dataset
+ pa_table = pa.Table.from_pydict(mapping)
+ yield file_idx, self._cast_table(pa_table)
+
+ # If the file has one json object per line
+ else:
+ with open(file, "rb") as f:
+ batch_idx = 0
+ # Use block_size equal to the chunk size divided by 32 to leverage multithreading
+ # Set a default minimum value of 16kB if the chunk size is really small
+ block_size = max(self.config.chunksize // 32, 16 << 10)
+ encoding_errors = (
+ self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
+ )
+ while True:
+ batch = f.read(self.config.chunksize)
+ if not batch:
+ break
+ # Finish current line
+ try:
+ batch += f.readline()
+ except (AttributeError, io.UnsupportedOperation):
+ batch += readline(f)
+ # PyArrow only accepts utf-8 encoded bytes
+ if self.config.encoding != "utf-8":
+ batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8")
+ try:
+ while True:
+ try:
+ pa_table = paj.read_json(
+ io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)
+ )
+ break
+ except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
+ if (
+ isinstance(e, pa.ArrowInvalid)
+ and "straddling" not in str(e)
+ or block_size > len(batch)
+ ):
+ raise
+ else:
+ # Increase the block size in case it was too small.
+ # The block size will be reset for the next file.
+ logger.debug(
+ f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."
+ )
+ block_size *= 2
+ except pa.ArrowInvalid as e:
+ try:
+ with open(
+ file, encoding=self.config.encoding, errors=self.config.encoding_errors
+ ) as f:
+ dataset = json.load(f)
+ except json.JSONDecodeError:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise e
+ # If possible, parse the file as a list of json objects/strings and exit the loop
+ if isinstance(dataset, list): # list is the only sequence type supported in JSON
+ try:
+ if dataset and isinstance(dataset[0], str):
+ pa_table_names = (
+ list(self.config.features)
+ if self.config.features is not None
+ else ["text"]
+ )
+ pa_table = pa.Table.from_arrays([pa.array(dataset)], names=pa_table_names)
+ else:
+ keys = set().union(*[row.keys() for row in dataset])
+ mapping = {col: [row.get(col) for row in dataset] for col in keys}
+ pa_table = pa.Table.from_pydict(mapping)
+ except (pa.ArrowInvalid, AttributeError) as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
+ yield file_idx, self._cast_table(pa_table)
+ break
+ else:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise ValueError(
+ f"Not able to read records in the JSON file at {file}. "
+ f"You should probably indicate the field of the JSON file containing your records. "
+ f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
+ f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. "
+ ) from None
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
+ batch_idx += 1
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7bad9756a980726e06b03312b936c7c25f6974a4
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a0241ac26ac9bc7cd573badc8875f504ac5239f2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py
new file mode 100644
index 0000000000000000000000000000000000000000..fee5f7c4c6123985beb2026ba4a01f80d7625205
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/spark/spark.py
@@ -0,0 +1,349 @@
+import os
+import posixpath
+import uuid
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
+
+import numpy as np
+import pyarrow as pa
+
+import datasets
+from datasets.arrow_writer import ArrowWriter, ParquetWriter
+from datasets.config import MAX_SHARD_SIZE
+from datasets.filesystems import (
+ is_remote_filesystem,
+ rename,
+)
+from datasets.iterable_dataset import _BaseExamplesIterable
+from datasets.utils.py_utils import convert_file_size_to_int
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+if TYPE_CHECKING:
+ import pyspark
+
+
+@dataclass
+class SparkConfig(datasets.BuilderConfig):
+ """BuilderConfig for Spark."""
+
+ features: Optional[datasets.Features] = None
+
+
+def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]):
+ df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}")
+ for partition_id in new_partition_order[1:]:
+ partition_df = df.select("*").where(f"part_id = {partition_id}")
+ df_combined = df_combined.union(partition_df)
+ return df_combined
+
+
+def _generate_iterable_examples(
+ df: "pyspark.sql.DataFrame",
+ partition_order: List[int],
+):
+ import pyspark
+
+ def generate_fn():
+ df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id"))
+ partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order)
+ row_id = 0
+ # pipeline next partition in parallel to hide latency
+ rows = partition_df.toLocalIterator(prefetchPartitions=True)
+ curr_partition = -1
+ for row in rows:
+ row_as_dict = row.asDict()
+ part_id = row_as_dict["part_id"]
+ row_as_dict.pop("part_id")
+ if curr_partition != part_id:
+ curr_partition = part_id
+ row_id = 0
+ yield f"{part_id}_{row_id}", row_as_dict
+ row_id += 1
+
+ return generate_fn
+
+
+class SparkExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ df: "pyspark.sql.DataFrame",
+ partition_order=None,
+ ):
+ self.df = df
+ self.partition_order = partition_order or range(self.df.rdd.getNumPartitions())
+ self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order)
+
+ def __iter__(self):
+ yield from self.generate_examples_fn()
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable":
+ partition_order = list(range(self.df.rdd.getNumPartitions()))
+ generator.shuffle(partition_order)
+ return SparkExamplesIterable(self.df, partition_order=partition_order)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable":
+ partition_order = self.split_shard_indices_by_worker(worker_id, num_workers)
+ return SparkExamplesIterable(self.df, partition_order=partition_order)
+
+ @property
+ def n_shards(self) -> int:
+ return len(self.partition_order)
+
+
+class Spark(datasets.DatasetBuilder):
+ BUILDER_CONFIG_CLASS = SparkConfig
+
+ def __init__(
+ self,
+ df: "pyspark.sql.DataFrame",
+ cache_dir: str = None,
+ working_dir: str = None,
+ **config_kwargs,
+ ):
+ import pyspark
+
+ self._spark = pyspark.sql.SparkSession.builder.getOrCreate()
+ self.df = df
+ self._working_dir = working_dir
+
+ super().__init__(
+ cache_dir=cache_dir,
+ config_name=str(self.df.semanticHash()),
+ **config_kwargs,
+ )
+
+ def _validate_cache_dir(self):
+ # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling
+ # error due to pickling the SparkContext.
+ cache_dir = self._cache_dir
+
+ # Returns the path of the created file.
+ def create_cache_and_write_probe(context):
+ # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
+ # already exist.
+ os.makedirs(cache_dir, exist_ok=True)
+ probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex)
+ # Opening the file in append mode will create a new file unless it already exists, in which case it will not
+ # change the file contents.
+ open(probe_file, "a")
+ return [probe_file]
+
+ if self._spark.conf.get("spark.master", "").startswith("local"):
+ return
+
+ # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
+ # accessible to the driver.
+ # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
+ if self._cache_dir:
+ probe = (
+ self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect()
+ )
+ if os.path.isfile(probe[0]):
+ return
+
+ raise ValueError(
+ "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir"
+ )
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager):
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
+
+ def _repartition_df_if_needed(self, max_shard_size):
+ import pyspark
+
+ def get_arrow_batch_size(it):
+ for batch in it:
+ yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]})
+
+ df_num_rows = self.df.count()
+ sample_num_rows = df_num_rows if df_num_rows <= 100 else 100
+ # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
+ approx_bytes_per_row = (
+ self.df.limit(sample_num_rows)
+ .repartition(1)
+ .mapInArrow(get_arrow_batch_size, "batch_bytes: long")
+ .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes"))
+ .collect()[0]
+ .sample_bytes
+ / sample_num_rows
+ )
+ approx_total_size = approx_bytes_per_row * df_num_rows
+ if approx_total_size > max_shard_size:
+ # Make sure there is at least one row per partition.
+ new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size))
+ self.df = self.df.repartition(new_num_partitions)
+
+ def _prepare_split_single(
+ self,
+ fpath: str,
+ file_format: str,
+ max_shard_size: int,
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ import pyspark
+
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath
+ embed_local_files = file_format == "parquet"
+
+ # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
+ # pickling the SparkContext.
+ features = self.config.features
+ writer_batch_size = self._writer_batch_size
+ storage_options = self._fs.storage_options
+
+ def write_arrow(it):
+ # Within the same SparkContext, no two task attempts will share the same attempt ID.
+ task_id = pyspark.TaskContext().taskAttemptId()
+ first_batch = next(it, None)
+ if first_batch is None:
+ # Some partitions might not receive any data.
+ return pa.RecordBatch.from_arrays(
+ [[task_id], [0], [0]],
+ names=["task_id", "num_examples", "num_bytes"],
+ )
+ shard_id = 0
+ writer = writer_class(
+ features=features,
+ path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
+ writer_batch_size=writer_batch_size,
+ storage_options=storage_options,
+ embed_local_files=embed_local_files,
+ )
+ table = pa.Table.from_batches([first_batch])
+ writer.write_table(table)
+ for batch in it:
+ if max_shard_size is not None and writer._num_bytes >= max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ yield pa.RecordBatch.from_arrays(
+ [[task_id], [num_examples], [num_bytes]],
+ names=["task_id", "num_examples", "num_bytes"],
+ )
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
+ writer_batch_size=writer_batch_size,
+ storage_options=storage_options,
+ embed_local_files=embed_local_files,
+ )
+ table = pa.Table.from_batches([batch])
+ writer.write_table(table)
+
+ if writer._num_bytes > 0:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ yield pa.RecordBatch.from_arrays(
+ [[task_id], [num_examples], [num_bytes]],
+ names=["task_id", "num_examples", "num_bytes"],
+ )
+
+ if working_fpath != fpath:
+ for file in os.listdir(os.path.dirname(working_fpath)):
+ dest = os.path.join(os.path.dirname(fpath), os.path.basename(file))
+ shutil.move(file, dest)
+
+ stats = (
+ self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long")
+ .groupBy("task_id")
+ .agg(
+ pyspark.sql.functions.sum("num_examples").alias("total_num_examples"),
+ pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"),
+ pyspark.sql.functions.count("num_bytes").alias("num_shards"),
+ pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"),
+ )
+ .collect()
+ )
+ for row in stats:
+ yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
+
+ def _prepare_split(
+ self,
+ split_generator: "datasets.SplitGenerator",
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ self._validate_cache_dir()
+
+ max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE)
+ self._repartition_df_if_needed(max_shard_size)
+ is_local = not is_remote_filesystem(self._fs)
+ path_join = os.path.join if is_local else posixpath.join
+
+ SUFFIX = "-TTTTT-SSSSS-of-NNNNN"
+ fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = path_join(self._output_dir, fname)
+
+ total_num_examples = 0
+ total_num_bytes = 0
+ total_shards = 0
+ task_id_and_num_shards = []
+ all_shard_lengths = []
+
+ for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size):
+ (
+ num_examples,
+ num_bytes,
+ num_shards,
+ shard_lengths,
+ ) = content
+ if num_bytes > 0:
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ total_shards += num_shards
+ task_id_and_num_shards.append((task_id, num_shards))
+ all_shard_lengths.extend(shard_lengths)
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ split_generator.split_info.shard_lengths = all_shard_lengths
+
+ # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
+ # pickling error due to pickling the SparkContext.
+ fs = self._fs
+
+ # use the -SSSSS-of-NNNNN pattern
+ def _rename_shard(
+ task_id: int,
+ shard_id: int,
+ global_shard_id: int,
+ ):
+ rename(
+ fs,
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
+ fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ args = []
+ global_shard_id = 0
+ for i in range(len(task_id_and_num_shards)):
+ task_id, num_shards = task_id_and_num_shards[i]
+ for shard_id in range(num_shards):
+ args.append([task_id, shard_id, global_shard_id])
+ global_shard_id += 1
+ self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect()
+ else:
+ # don't use any pattern
+ shard_id = 0
+ task_id = task_id_and_num_shards[0][0]
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ def _get_examples_iterable_for_split(
+ self,
+ split_generator: "datasets.SplitGenerator",
+ ) -> SparkExamplesIterable:
+ return SparkExamplesIterable(self.df)
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6c7af041cf9299b5a843b0581a1ec3c53abb39e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c3c445c2c545d0944581fbbc7c1e5d06cc61eb5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0791ba88594fb8e76c957a11cca9936cf321bb4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py
@@ -0,0 +1,118 @@
+import sys
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
+
+import pandas as pd
+import pyarrow as pa
+
+import datasets
+import datasets.config
+from datasets.features.features import require_storage_cast
+from datasets.table import table_cast
+
+
+if TYPE_CHECKING:
+ import sqlite3
+
+ import sqlalchemy
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class SqlConfig(datasets.BuilderConfig):
+ """BuilderConfig for SQL."""
+
+ sql: Union[str, "sqlalchemy.sql.Selectable"] = None
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
+ index_col: Optional[Union[str, List[str]]] = None
+ coerce_float: bool = True
+ params: Optional[Union[List, Tuple, Dict]] = None
+ parse_dates: Optional[Union[List, Dict]] = None
+ columns: Optional[List[str]] = None
+ chunksize: Optional[int] = 10_000
+ features: Optional[datasets.Features] = None
+
+ def __post_init__(self):
+ if self.sql is None:
+ raise ValueError("sql must be specified")
+ if self.con is None:
+ raise ValueError("con must be specified")
+
+ def create_config_id(
+ self,
+ config_kwargs: dict,
+ custom_features: Optional[datasets.Features] = None,
+ ) -> str:
+ config_kwargs = config_kwargs.copy()
+ # We need to stringify the Selectable object to make its hash deterministic
+
+ # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
+ sql = config_kwargs["sql"]
+ if not isinstance(sql, str):
+ if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
+ import sqlalchemy
+
+ if isinstance(sql, sqlalchemy.sql.Selectable):
+ engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
+ sql_str = str(sql.compile(dialect=engine.dialect))
+ config_kwargs["sql"] = sql_str
+ else:
+ raise TypeError(
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
+ )
+ else:
+ raise TypeError(
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
+ )
+ con = config_kwargs["con"]
+ if not isinstance(con, str):
+ config_kwargs["con"] = id(con)
+ logger.info(
+ f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
+ )
+
+ return super().create_config_id(config_kwargs, custom_features=custom_features)
+
+ @property
+ def pd_read_sql_kwargs(self):
+ pd_read_sql_kwargs = {
+ "index_col": self.index_col,
+ "columns": self.columns,
+ "params": self.params,
+ "coerce_float": self.coerce_float,
+ "parse_dates": self.parse_dates,
+ }
+ return pd_read_sql_kwargs
+
+
+class Sql(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = SqlConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ schema = self.config.features.arrow_schema
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
+ # cheaper cast
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
+ else:
+ # more expensive cast; allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, schema)
+ return pa_table
+
+ def _generate_tables(self):
+ chunksize = self.config.chunksize
+ sql_reader = pd.read_sql(
+ self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
+ )
+ sql_reader = [sql_reader] if chunksize is None else sql_reader
+ for chunk_idx, df in enumerate(sql_reader):
+ pa_table = pa.Table.from_pandas(df)
+ yield chunk_idx, self._cast_table(pa_table)
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..41fca68bd71e9cbd51d5b8f3af10c0ffa2816fab
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8ff62a21260cc81df18861dde653d72f98fe8c9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ab5669f427036be52a0a7451f5a7bcb7632b802
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd8c054842e090dc09bdf3d2fee59241a1a928c5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py
@@ -0,0 +1,285 @@
+#
+# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved.
+# This file coems from the WebDataset library.
+# See the LICENSE file for licensing terms (BSD-style).
+#
+
+"""
+Binary tensor encodings for PyTorch and NumPy.
+
+This defines efficient binary encodings for tensors. The format is 8 byte
+aligned and can be used directly for computations when transmitted, say,
+via RDMA. The format is supported by WebDataset with the `.ten` filename
+extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used
+for fast tensor storage with LMDB and in disk files (which can be memory
+mapped)
+
+Data is encoded as a series of chunks:
+
+- magic number (int64)
+- length in bytes (int64)
+- bytes (multiple of 64 bytes long)
+
+Arrays are a header chunk followed by a data chunk.
+Header chunks have the following structure:
+
+- dtype (int64)
+- 8 byte array name
+- ndim (int64)
+- dim[0]
+- dim[1]
+- ...
+"""
+
+import struct
+import sys
+
+import numpy as np
+
+
+def bytelen(a):
+ """Determine the length of a in bytes."""
+ if hasattr(a, "nbytes"):
+ return a.nbytes
+ elif isinstance(a, (bytearray, bytes)):
+ return len(a)
+ else:
+ raise ValueError(a, "cannot determine nbytes")
+
+
+def bytedata(a):
+ """Return a the raw data corresponding to a."""
+ if isinstance(a, (bytearray, bytes, memoryview)):
+ return a
+ elif hasattr(a, "data"):
+ return a.data
+ else:
+ raise ValueError(a, "cannot return bytedata")
+
+
+# tables for converting between long/short NumPy dtypes
+
+long_to_short = """
+float16 f2
+float32 f4
+float64 f8
+int8 i1
+int16 i2
+int32 i4
+int64 i8
+uint8 u1
+uint16 u2
+unit32 u4
+uint64 u8
+""".strip()
+long_to_short = [x.split() for x in long_to_short.split("\n")]
+long_to_short = {x[0]: x[1] for x in long_to_short}
+short_to_long = {v: k for k, v in long_to_short.items()}
+
+
+def check_acceptable_input_type(data, allow64):
+ """Check that the data has an acceptable type for tensor encoding.
+
+ :param data: array
+ :param allow64: allow 64 bit types
+ """
+ for a in data:
+ if a.dtype.name not in long_to_short:
+ raise ValueError("unsupported dataypte")
+ if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]:
+ raise ValueError("64 bit datatypes not allowed unless explicitly enabled")
+
+
+def str64(s):
+ """Convert a string to an int64."""
+ s = s + "\0" * (8 - len(s))
+ s = s.encode("ascii")
+ return struct.unpack("@q", s)[0]
+
+
+def unstr64(i):
+ """Convert an int64 to a string."""
+ b = struct.pack("@q", i)
+ return b.decode("ascii").strip("\0")
+
+
+def check_infos(data, infos, required_infos=None):
+ """Verify the info strings."""
+ if required_infos is False or required_infos is None:
+ return data
+ if required_infos is True:
+ return data, infos
+ if not isinstance(required_infos, (tuple, list)):
+ raise ValueError("required_infos must be tuple or list")
+ for required, actual in zip(required_infos, infos):
+ raise ValueError(f"actual info {actual} doesn't match required info {required}")
+ return data
+
+
+def encode_header(a, info=""):
+ """Encode an array header as a byte array."""
+ if a.ndim >= 10:
+ raise ValueError("too many dimensions")
+ if a.nbytes != np.prod(a.shape) * a.itemsize:
+ raise ValueError("mismatch between size and shape")
+ if a.dtype.name not in long_to_short:
+ raise ValueError("unsupported array type")
+ header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape)
+ return bytedata(np.array(header, dtype="i8"))
+
+
+def decode_header(h):
+ """Decode a byte array into an array header."""
+ h = np.frombuffer(h, dtype="i8")
+ if unstr64(h[0]) not in short_to_long:
+ raise ValueError("unsupported array type")
+ dtype = np.dtype(short_to_long[unstr64(h[0])])
+ info = unstr64(h[1])
+ rank = int(h[2])
+ shape = tuple(h[3 : 3 + rank])
+ return shape, dtype, info
+
+
+def encode_list(l, infos=None): # noqa: E741
+ """Given a list of arrays, encode them into a list of byte arrays."""
+ if infos is None:
+ infos = [""]
+ else:
+ if len(l) != len(infos):
+ raise ValueError(f"length of list {l} must muatch length of infos {infos}")
+ result = []
+ for i, a in enumerate(l):
+ header = encode_header(a, infos[i % len(infos)])
+ result += [header, bytedata(a)]
+ return result
+
+
+def decode_list(l, infos=False): # noqa: E741
+ """Given a list of byte arrays, decode them into arrays."""
+ result = []
+ infos0 = []
+ for header, data in zip(l[::2], l[1::2]):
+ shape, dtype, info = decode_header(header)
+ a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape)
+ result += [a]
+ infos0 += [info]
+ return check_infos(result, infos0, infos)
+
+
+magic_str = "~TenBin~"
+magic = str64(magic_str)
+magic_bytes = unstr64(magic).encode("ascii")
+
+
+def roundup(n, k=64):
+ """Round up to the next multiple of 64."""
+ return k * ((n + k - 1) // k)
+
+
+def encode_chunks(l): # noqa: E741
+ """Encode a list of chunks into a single byte array, with lengths and magics.."""
+ size = sum(16 + roundup(b.nbytes) for b in l)
+ result = bytearray(size)
+ offset = 0
+ for b in l:
+ result[offset : offset + 8] = magic_bytes
+ offset += 8
+ result[offset : offset + 8] = struct.pack("@q", b.nbytes)
+ offset += 8
+ result[offset : offset + bytelen(b)] = b
+ offset += roundup(bytelen(b))
+ return result
+
+
+def decode_chunks(buf):
+ """Decode a byte array into a list of chunks."""
+ result = []
+ offset = 0
+ total = bytelen(buf)
+ while offset < total:
+ if magic_bytes != buf[offset : offset + 8]:
+ raise ValueError("magic bytes mismatch")
+ offset += 8
+ nbytes = struct.unpack("@q", buf[offset : offset + 8])[0]
+ offset += 8
+ b = buf[offset : offset + nbytes]
+ offset += roundup(nbytes)
+ result.append(b)
+ return result
+
+
+def encode_buffer(l, infos=None): # noqa: E741
+ """Encode a list of arrays into a single byte array."""
+ if not isinstance(l, list):
+ raise ValueError("requires list")
+ return encode_chunks(encode_list(l, infos=infos))
+
+
+def decode_buffer(buf, infos=False):
+ """Decode a byte array into a list of arrays."""
+ return decode_list(decode_chunks(buf), infos=infos)
+
+
+def write_chunk(stream, buf):
+ """Write a byte chunk to the stream with magics, length, and padding."""
+ nbytes = bytelen(buf)
+ stream.write(magic_bytes)
+ stream.write(struct.pack("@q", nbytes))
+ stream.write(bytedata(buf))
+ padding = roundup(nbytes) - nbytes
+ if padding > 0:
+ stream.write(b"\0" * padding)
+
+
+def read_chunk(stream):
+ """Read a byte chunk from a stream with magics, length, and padding."""
+ magic = stream.read(8)
+ if magic == b"":
+ return None
+ if magic != magic_bytes:
+ raise ValueError("magic number does not match")
+ nbytes = stream.read(8)
+ nbytes = struct.unpack("@q", nbytes)[0]
+ if nbytes < 0:
+ raise ValueError("negative nbytes")
+ data = stream.read(nbytes)
+ padding = roundup(nbytes) - nbytes
+ if padding > 0:
+ stream.read(padding)
+ return data
+
+
+def write(stream, l, infos=None): # noqa: E741
+ """Write a list of arrays to a stream, with magics, length, and padding."""
+ for chunk in encode_list(l, infos=infos):
+ write_chunk(stream, chunk)
+
+
+def read(stream, n=sys.maxsize, infos=False):
+ """Read a list of arrays from a stream, with magics, length, and padding."""
+ chunks = []
+ for _ in range(n):
+ header = read_chunk(stream)
+ if header is None:
+ break
+ data = read_chunk(stream)
+ if data is None:
+ raise ValueError("premature EOF")
+ chunks += [header, data]
+ return decode_list(chunks, infos=infos)
+
+
+def save(fname, *args, infos=None, nocheck=False):
+ """Save a list of arrays to a file, with magics, length, and padding."""
+ if not nocheck and not fname.endswith(".ten"):
+ raise ValueError("file name should end in .ten")
+ with open(fname, "wb") as stream:
+ write(stream, args, infos=infos)
+
+
+def load(fname, infos=False, nocheck=False):
+ """Read a list of arrays from a file, with magics, length, and padding."""
+ if not nocheck and not fname.endswith(".ten"):
+ raise ValueError("file name should end in .ten")
+ with open(fname, "rb") as stream:
+ return read(stream, infos=infos)
diff --git a/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ac1e86fc417863ba9b5fd8fca97581c63d48768
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py
@@ -0,0 +1,299 @@
+import io
+import json
+from itertools import islice
+from typing import Any, Callable, Dict, List
+
+import numpy as np
+import pyarrow as pa
+
+import datasets
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+class WebDataset(datasets.GeneratorBasedBuilder):
+ DEFAULT_WRITER_BATCH_SIZE = 100
+ IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script
+ AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script
+ DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script
+ NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5
+
+ @classmethod
+ def _get_pipeline_from_tar(cls, tar_path, tar_iterator):
+ current_example = {}
+ for filename, f in tar_iterator:
+ if "." in filename:
+ example_key, field_name = filename.split(".", 1)
+ if current_example and current_example["__key__"] != example_key:
+ yield current_example
+ current_example = {}
+ current_example["__key__"] = example_key
+ current_example["__url__"] = tar_path
+ current_example[field_name.lower()] = f.read()
+ if field_name in cls.DECODERS:
+ current_example[field_name] = cls.DECODERS[field_name](current_example[field_name])
+ if current_example:
+ yield current_example
+
+ def _info(self) -> datasets.DatasetInfo:
+ return datasets.DatasetInfo()
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ # Download the data files
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ data_files = dl_manager.download(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ tar_paths = data_files
+ if isinstance(tar_paths, str):
+ tar_paths = [tar_paths]
+ tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
+ splits = [
+ datasets.SplitGenerator(
+ name=datasets.Split.TRAIN, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
+ )
+ ]
+ else:
+ splits = []
+ for split_name, tar_paths in data_files.items():
+ if isinstance(tar_paths, str):
+ tar_paths = [tar_paths]
+ tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths]
+ splits.append(
+ datasets.SplitGenerator(
+ name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators}
+ )
+ )
+ if not self.info.features:
+ # Get one example to get the feature types
+ pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0])
+ first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE))
+ if any(example.keys() != first_examples[0].keys() for example in first_examples):
+ raise ValueError(
+ "The TAR archives of the dataset should be in WebDataset format, "
+ "but the files in the archive don't share the same prefix or the same types."
+ )
+ pa_tables = [pa.Table.from_pylist([example]) for example in first_examples]
+ if datasets.config.PYARROW_VERSION.major < 14:
+ inferred_arrow_schema = pa.concat_tables(pa_tables, promote=True).schema
+ else:
+ inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema
+ features = datasets.Features.from_arrow_schema(inferred_arrow_schema)
+
+ # Set Image types
+ for field_name in first_examples[0]:
+ extension = field_name.rsplit(".", 1)[-1]
+ if extension in self.IMAGE_EXTENSIONS:
+ features[field_name] = datasets.Image()
+ # Set Audio types
+ for field_name in first_examples[0]:
+ extension = field_name.rsplit(".", 1)[-1]
+ if extension in self.AUDIO_EXTENSIONS:
+ features[field_name] = datasets.Audio()
+ self.info.features = features
+
+ return splits
+
+ def _generate_examples(self, tar_paths, tar_iterators):
+ image_field_names = [
+ field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image)
+ ]
+ audio_field_names = [
+ field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio)
+ ]
+ for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)):
+ for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)):
+ for field_name in image_field_names + audio_field_names:
+ example[field_name] = {"path": example["__key__"] + "." + field_name, "bytes": example[field_name]}
+ yield f"{tar_idx}_{example_idx}", example
+
+
+# Obtained with:
+# ```
+# import PIL.Image
+# IMAGE_EXTENSIONS = []
+# PIL.Image.init()
+# for ext, format in PIL.Image.EXTENSION.items():
+# if format in PIL.Image.OPEN:
+# IMAGE_EXTENSIONS.append(ext[1:])
+# ```
+# We intentionally do not run this code on launch because:
+# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed
+# (2) To ensure the list of supported extensions is deterministic
+IMAGE_EXTENSIONS = [
+ "blp",
+ "bmp",
+ "dib",
+ "bufr",
+ "cur",
+ "pcx",
+ "dcx",
+ "dds",
+ "ps",
+ "eps",
+ "fit",
+ "fits",
+ "fli",
+ "flc",
+ "ftc",
+ "ftu",
+ "gbr",
+ "gif",
+ "grib",
+ "h5",
+ "hdf",
+ "png",
+ "apng",
+ "jp2",
+ "j2k",
+ "jpc",
+ "jpf",
+ "jpx",
+ "j2c",
+ "icns",
+ "ico",
+ "im",
+ "iim",
+ "tif",
+ "tiff",
+ "jfif",
+ "jpe",
+ "jpg",
+ "jpeg",
+ "mpg",
+ "mpeg",
+ "msp",
+ "pcd",
+ "pxr",
+ "pbm",
+ "pgm",
+ "ppm",
+ "pnm",
+ "psd",
+ "bw",
+ "rgb",
+ "rgba",
+ "sgi",
+ "ras",
+ "tga",
+ "icb",
+ "vda",
+ "vst",
+ "webp",
+ "wmf",
+ "emf",
+ "xbm",
+ "xpm",
+]
+WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS
+
+
+# Obtained with:
+# ```
+# import soundfile as sf
+#
+# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
+#
+# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
+# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
+# ```
+# We intentionally do not run this code on launch because:
+# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
+# (2) To ensure the list of supported extensions is deterministic
+AUDIO_EXTENSIONS = [
+ "aiff",
+ "au",
+ "avr",
+ "caf",
+ "flac",
+ "htk",
+ "svx",
+ "mat4",
+ "mat5",
+ "mpc2k",
+ "ogg",
+ "paf",
+ "pvf",
+ "raw",
+ "rf64",
+ "sd2",
+ "sds",
+ "ircam",
+ "voc",
+ "w64",
+ "wav",
+ "nist",
+ "wavex",
+ "wve",
+ "xi",
+ "mp3",
+ "opus",
+]
+WebDataset.AUDIO_EXTENSIONS = AUDIO_EXTENSIONS
+
+
+def text_loads(data: bytes):
+ return data.decode("utf-8")
+
+
+def tenbin_loads(data: bytes):
+ from . import _tenbin
+
+ return _tenbin.decode_buffer(data)
+
+
+def msgpack_loads(data: bytes):
+ import msgpack
+
+ return msgpack.unpackb(data)
+
+
+def npy_loads(data: bytes):
+ import numpy.lib.format
+
+ stream = io.BytesIO(data)
+ return numpy.lib.format.read_array(stream, allow_pickle=False)
+
+
+def npz_loads(data: bytes):
+ return np.load(io.BytesIO(data), allow_pickle=False)
+
+
+def cbor_loads(data: bytes):
+ import cbor
+
+ return cbor.loads(data)
+
+
+# Obtained by checking `decoders` in `webdataset.autodecode`
+# and removing unsafe extension decoders.
+# Removed Pickle decoders:
+# - "pyd": lambda data: pickle.loads(data)
+# - "pickle": lambda data: pickle.loads(data)
+# Removed Torch decoders:
+# - "pth": lambda data: torch_loads(data)
+# Modified NumPy decoders to fix CVE-2019-6446 (add allow_pickle=False):
+# - "npy": npy_loads,
+# - "npz": lambda data: np.load(io.BytesIO(data)),
+DECODERS = {
+ "txt": text_loads,
+ "text": text_loads,
+ "transcript": text_loads,
+ "cls": int,
+ "cls2": int,
+ "index": int,
+ "inx": int,
+ "id": int,
+ "json": json.loads,
+ "jsn": json.loads,
+ "ten": tenbin_loads,
+ "tb": tenbin_loads,
+ "mp": msgpack_loads,
+ "msg": msgpack_loads,
+ "npy": npy_loads,
+ "npz": npz_loads,
+ "cbor": cbor_loads,
+}
+WebDataset.DECODERS = DECODERS
diff --git a/venv/lib/python3.10/site-packages/datasets/parallel/__init__.py b/venv/lib/python3.10/site-packages/datasets/parallel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d83093588514bec18b3536f4287a699939af499e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/parallel/__init__.py
@@ -0,0 +1 @@
+from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401
diff --git a/venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..441b9e31f5d809139f2a1aa623832e4ddee660ef
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62ebde8c72ec2733734ed0fdefa75bfacd69afc6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/parallel/parallel.py b/venv/lib/python3.10/site-packages/datasets/parallel/parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cad2c48ba2b0a6e71c62babd4eb052a3dd1dfd4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/parallel/parallel.py
@@ -0,0 +1,120 @@
+import contextlib
+from multiprocessing import Pool, RLock
+
+from tqdm.auto import tqdm
+
+from ..utils import experimental, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ParallelBackendConfig:
+ backend_name = None
+
+
+@experimental
+def parallel_map(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func):
+ """
+ **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either
+ multiprocessing.Pool or joblib for parallelization.
+
+ Args:
+ function (`Callable[[Any], Any]`): Function to be applied to `iterable`.
+ iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to.
+ num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib).
+ types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements.
+ disable_tqdm (`bool`): Whether to disable the tqdm progressbar.
+ desc (`str`): Prefix for the tqdm progressbar.
+ single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`.
+ Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an
+ element of `iterable`, and `rank` is used for progress bar.
+ """
+ if ParallelBackendConfig.backend_name is None:
+ return _map_with_multiprocessing_pool(
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
+ )
+
+ return _map_with_joblib(
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
+ )
+
+
+def _map_with_multiprocessing_pool(
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
+):
+ num_proc = num_proc if num_proc <= len(iterable) else len(iterable)
+ split_kwds = [] # We organize the splits ourselve (contiguous splits)
+ for index in range(num_proc):
+ div = len(iterable) // num_proc
+ mod = len(iterable) % num_proc
+ start = div * index + min(index, mod)
+ end = start + div + (1 if index < mod else 0)
+ split_kwds.append((function, iterable[start:end], batched, batch_size, types, index, disable_tqdm, desc))
+
+ if len(iterable) != sum(len(i[1]) for i in split_kwds):
+ raise ValueError(
+ f"Error dividing inputs iterable among processes. "
+ f"Total number of objects {len(iterable)}, "
+ f"length: {sum(len(i[1]) for i in split_kwds)}"
+ )
+
+ logger.info(
+ f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}"
+ )
+ initargs, initializer = None, None
+ if not disable_tqdm:
+ initargs, initializer = (RLock(),), tqdm.set_lock
+ with Pool(num_proc, initargs=initargs, initializer=initializer) as pool:
+ mapped = pool.map(single_map_nested_func, split_kwds)
+ logger.info(f"Finished {num_proc} processes")
+ mapped = [obj for proc_res in mapped for obj in proc_res]
+ logger.info(f"Unpacked {len(mapped)} objects")
+
+ return mapped
+
+
+def _map_with_joblib(
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
+):
+ # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
+ # and it requires monkey-patching joblib internal classes which is subject to change
+ import joblib
+
+ with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc):
+ return joblib.Parallel()(
+ joblib.delayed(single_map_nested_func)((function, obj, batched, batch_size, types, None, True, None))
+ for obj in iterable
+ )
+
+
+@experimental
+@contextlib.contextmanager
+def parallel_backend(backend_name: str):
+ """
+ **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization
+ implemented by joblib.
+
+ Args:
+ backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib.
+
+ Example usage:
+ ```py
+ with parallel_backend('spark'):
+ dataset = load_dataset(..., num_proc=2)
+ ```
+ """
+ ParallelBackendConfig.backend_name = backend_name
+
+ if backend_name == "spark":
+ from joblibspark import register_spark
+
+ register_spark()
+
+ # TODO: call create_cache_and_write_probe if "download" in steps
+ # TODO: raise NotImplementedError when Dataset.map etc is called
+
+ try:
+ yield
+ finally:
+ ParallelBackendConfig.backend_name = None
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__init__.py b/venv/lib/python3.10/site-packages/datasets/tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a18a1e79349cfb32a743aeca4c3e9a809645a75
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/__init__.py
@@ -0,0 +1,46 @@
+from typing import Optional
+
+from ..utils.logging import get_logger
+from .audio_classification import AudioClassification
+from .automatic_speech_recognition import AutomaticSpeechRecognition
+from .base import TaskTemplate
+from .image_classification import ImageClassification
+from .language_modeling import LanguageModeling
+from .question_answering import QuestionAnsweringExtractive
+from .summarization import Summarization
+from .text_classification import TextClassification
+
+
+__all__ = [
+ "AutomaticSpeechRecognition",
+ "AudioClassification",
+ "ImageClassification",
+ "LanguageModeling",
+ "QuestionAnsweringExtractive",
+ "Summarization",
+ "TaskTemplate",
+ "TextClassification",
+]
+
+logger = get_logger(__name__)
+
+
+NAME2TEMPLATE = {
+ AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
+ AudioClassification.task: AudioClassification,
+ ImageClassification.task: ImageClassification,
+ LanguageModeling.task: LanguageModeling,
+ QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
+ Summarization.task: Summarization,
+ TextClassification.task: TextClassification,
+}
+
+
+def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
+ """Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
+ task_name = task_template_dict.get("task")
+ if task_name is None:
+ logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
+ return None
+ template = NAME2TEMPLATE.get(task_name)
+ return template.from_dict(task_template_dict)
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90a18365203820e049f234d22b1fbc0d4a4579cc
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d1e35a46c8ba1b5cb9aab1964500c6f3e0d420a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/audio_classification.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27b43ea9be31c1a63d8634d5c4c0d62dc357dda0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/automatic_speech_recognition.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4efe4207c98f9656c27a05b0e7a04c364aaece8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/base.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ea0535a83b7e5917f422159e01c47f835b34136
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/image_classification.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dbaa8cb7d0532956dffb9de1372841179f6279e0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/language_modeling.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6193aed1ab093907fd1031db4a9f0a1ea63adde5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..65aecf29d7d136af535496ffae1e351b84929d75
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/summarization.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6b499be25007a88950174215ffe19cad9ca49de
Binary files /dev/null and b/venv/lib/python3.10/site-packages/datasets/tasks/__pycache__/text_classification.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/audio_classification.py b/venv/lib/python3.10/site-packages/datasets/tasks/audio_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f9fe402f3814b4db0eb1832405adcfaef77503e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/audio_classification.py
@@ -0,0 +1,33 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Audio, ClassLabel, Features
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class AudioClassification(TaskTemplate):
+ task: str = field(default="audio-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"audio": Audio()})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ audio_column: str = "audio"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.audio_column: "audio",
+ self.label_column: "labels",
+ }
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py b/venv/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py
new file mode 100644
index 0000000000000000000000000000000000000000..103a98a1bc9774de6b652bbc69b41501a419f0f8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/automatic_speech_recognition.py
@@ -0,0 +1,30 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Audio, Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class AutomaticSpeechRecognition(TaskTemplate):
+ task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"audio": Audio()})
+ label_schema: ClassVar[Features] = Features({"transcription": Value("string")})
+ audio_column: str = "audio"
+ transcription_column: str = "transcription"
+
+ def align_with_features(self, features):
+ if self.audio_column not in features:
+ raise ValueError(f"Column {self.audio_column} is not present in features.")
+ if not isinstance(features[self.audio_column], Audio):
+ raise ValueError(f"Column {self.audio_column} is not an Audio type.")
+ task_template = copy.deepcopy(self)
+ input_schema = self.input_schema.copy()
+ input_schema["audio"] = features[self.audio_column]
+ task_template.__dict__["input_schema"] = input_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.audio_column: "audio", self.transcription_column: "transcription"}
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/base.py b/venv/lib/python3.10/site-packages/datasets/tasks/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..21a5337ffc0784a1ed12f4617a9a0ef6ba7253e5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/base.py
@@ -0,0 +1,39 @@
+import abc
+import copy
+import dataclasses
+from dataclasses import dataclass
+from typing import ClassVar, Dict, Type, TypeVar
+
+from ..features import Features
+
+
+T = TypeVar("T", bound="TaskTemplate")
+
+
+@dataclass(frozen=True)
+class TaskTemplate(abc.ABC):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str
+ input_schema: ClassVar[Features]
+ label_schema: ClassVar[Features]
+
+ def align_with_features(self: T, features: Features) -> T:
+ """
+ Align features with the task template.
+ """
+ # No-op
+ return copy.deepcopy(self)
+
+ @property
+ def features(self) -> Features:
+ return Features(**self.input_schema, **self.label_schema)
+
+ @property
+ @abc.abstractmethod
+ def column_mapping(self) -> Dict[str, str]:
+ raise NotImplementedError
+
+ @classmethod
+ def from_dict(cls: Type[T], template_dict: dict) -> T:
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in template_dict.items() if k in field_names})
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/image_classification.py b/venv/lib/python3.10/site-packages/datasets/tasks/image_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..20a19e0408a7ec8061ac4fac700d83e6dcbadcdf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/image_classification.py
@@ -0,0 +1,33 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import ClassLabel, Features, Image
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class ImageClassification(TaskTemplate):
+ task: str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"image": Image()})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ image_column: str = "image"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.image_column: "image",
+ self.label_column: "labels",
+ }
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/language_modeling.py b/venv/lib/python3.10/site-packages/datasets/tasks/language_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2837744fa1718e57ffbeeca1a6e9a60c9468d8f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/language_modeling.py
@@ -0,0 +1,18 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class LanguageModeling(TaskTemplate):
+ task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
+
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({})
+ text_column: str = "text"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text"}
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/question_answering.py b/venv/lib/python3.10/site-packages/datasets/tasks/question_answering.py
new file mode 100644
index 0000000000000000000000000000000000000000..349fd54141762631eec025681015cedd97c23b63
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/question_answering.py
@@ -0,0 +1,29 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Sequence, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class QuestionAnsweringExtractive(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"question": Value("string"), "context": Value("string")})
+ label_schema: ClassVar[Features] = Features(
+ {
+ "answers": Sequence(
+ {
+ "text": Value("string"),
+ "answer_start": Value("int32"),
+ }
+ )
+ }
+ )
+ question_column: str = "question"
+ context_column: str = "context"
+ answers_column: str = "answers"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/summarization.py b/venv/lib/python3.10/site-packages/datasets/tasks/summarization.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0057b07b4f62947c1bfde1962bf06be1427c363
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/summarization.py
@@ -0,0 +1,19 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class Summarization(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({"summary": Value("string")})
+ text_column: str = "text"
+ summary_column: str = "summary"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text", self.summary_column: "summary"}
diff --git a/venv/lib/python3.10/site-packages/datasets/tasks/text_classification.py b/venv/lib/python3.10/site-packages/datasets/tasks/text_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..13584b73e8ae668bd6c145b60598cd6859be5146
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/tasks/text_classification.py
@@ -0,0 +1,34 @@
+import copy
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import ClassLabel, Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class TextClassification(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="text-classification", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({"labels": ClassLabel})
+ text_column: str = "text"
+ label_column: str = "labels"
+
+ def align_with_features(self, features):
+ if self.label_column not in features:
+ raise ValueError(f"Column {self.label_column} is not present in features.")
+ if not isinstance(features[self.label_column], ClassLabel):
+ raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
+ task_template = copy.deepcopy(self)
+ label_schema = self.label_schema.copy()
+ label_schema["labels"] = features[self.label_column]
+ task_template.__dict__["label_schema"] = label_schema
+ return task_template
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {
+ self.text_column: "text",
+ self.label_column: "labels",
+ }
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/__init__.py b/venv/lib/python3.10/site-packages/datasets/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5df74ff8cac8f1fd30a5dd786c9cd5c89d2880af
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ruff: noqa
+
+from . import tqdm as _tqdm # _tqdm is the module
+from .info_utils import VerificationMode
+from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
+from .version import Version
+from .experimental import experimental
+from .tqdm import (
+ disable_progress_bars,
+ enable_progress_bars,
+ are_progress_bars_disabled,
+ tqdm,
+)
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/_dataset_viewer.py b/venv/lib/python3.10/site-packages/datasets/utils/_dataset_viewer.py
new file mode 100644
index 0000000000000000000000000000000000000000..58edecb1cc14cec752afc34c1551c49c7490ca4c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/_dataset_viewer.py
@@ -0,0 +1,96 @@
+from typing import Any, Dict, List, Optional, Union
+
+from .. import config
+from ..exceptions import DatasetsError
+from .file_utils import (
+ get_authentication_headers_for_url,
+ http_get,
+)
+from .logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+class DatasetViewerError(DatasetsError):
+ """Dataset viewer error.
+
+ Raised when trying to use the dataset viewer HTTP API and when trying to access:
+ - a missing dataset, or
+ - a private/gated dataset and the user is not authenticated.
+ - unavailable /parquet or /info responses
+ """
+
+
+def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
+ """
+ Get the dataset exported parquet files
+ Docs: https://huggingface.co/docs/datasets-server/parquet
+ """
+ dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
+ try:
+ parquet_data_files_response = http_get(
+ url=dataset_viewer_parquet_url + dataset,
+ temp_file=None,
+ headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
+ timeout=100.0,
+ max_retries=3,
+ )
+ parquet_data_files_response.raise_for_status()
+ if "X-Revision" in parquet_data_files_response.headers:
+ if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
+ parquet_data_files_response_json = parquet_data_files_response.json()
+ if (
+ parquet_data_files_response_json.get("partial") is False
+ and not parquet_data_files_response_json.get("pending", True)
+ and not parquet_data_files_response_json.get("failed", True)
+ and "parquet_files" in parquet_data_files_response_json
+ ):
+ return parquet_data_files_response_json["parquet_files"]
+ else:
+ logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
+ else:
+ logger.debug(
+ f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
+ )
+ except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
+ logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
+ raise DatasetViewerError("No exported Parquet files available.")
+
+
+def get_exported_dataset_infos(
+ dataset: str, revision: str, token: Optional[Union[str, bool]]
+) -> Dict[str, Dict[str, Any]]:
+ """
+ Get the dataset information, can be useful to get e.g. the dataset features.
+ Docs: https://huggingface.co/docs/datasets-server/info
+ """
+ dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
+ try:
+ info_response = http_get(
+ url=dataset_viewer_info_url + dataset,
+ temp_file=None,
+ headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
+ timeout=100.0,
+ max_retries=3,
+ )
+ info_response.raise_for_status()
+ if "X-Revision" in info_response.headers:
+ if info_response.headers["X-Revision"] == revision or revision is None:
+ info_response = info_response.json()
+ if (
+ info_response.get("partial") is False
+ and not info_response.get("pending", True)
+ and not info_response.get("failed", True)
+ and "dataset_info" in info_response
+ ):
+ return info_response["dataset_info"]
+ else:
+ logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
+ else:
+ logger.debug(
+ f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
+ )
+ except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
+ logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
+ raise DatasetViewerError("No exported dataset infos available.")
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/_dill.py b/venv/lib/python3.10/site-packages/datasets/utils/_dill.py
new file mode 100644
index 0000000000000000000000000000000000000000..15578198a39622340f937a3dfdd9091af26d5453
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/_dill.py
@@ -0,0 +1,459 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Extends `dill` to support pickling more types and produce more consistent dumps."""
+
+import os
+import sys
+from io import BytesIO
+from types import CodeType, FunctionType
+
+import dill
+from packaging import version
+
+from .. import config
+
+
+class Pickler(dill.Pickler):
+ dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
+ _legacy_no_dict_keys_sorting = False
+
+ def save(self, obj, save_persistent_id=True):
+ obj_type = type(obj)
+ if obj_type not in self.dispatch:
+ if "regex" in sys.modules:
+ import regex # type: ignore
+
+ if obj_type is regex.Pattern:
+ pklregister(obj_type)(_save_regexPattern)
+ if "spacy" in sys.modules:
+ import spacy # type: ignore
+
+ if issubclass(obj_type, spacy.Language):
+ pklregister(obj_type)(_save_spacyLanguage)
+ if "tiktoken" in sys.modules:
+ import tiktoken # type: ignore
+
+ if obj_type is tiktoken.Encoding:
+ pklregister(obj_type)(_save_tiktokenEncoding)
+ if "torch" in sys.modules:
+ import torch # type: ignore
+
+ if issubclass(obj_type, torch.Tensor):
+ pklregister(obj_type)(_save_torchTensor)
+
+ if obj_type is torch.Generator:
+ pklregister(obj_type)(_save_torchGenerator)
+
+ # Unwrap `torch.compile`-ed modules
+ if issubclass(obj_type, torch.nn.Module):
+ obj = getattr(obj, "_orig_mod", obj)
+ if "transformers" in sys.modules:
+ import transformers # type: ignore
+
+ if issubclass(obj_type, transformers.PreTrainedTokenizerBase):
+ pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase)
+
+ # Unwrap `torch.compile`-ed functions
+ if obj_type is FunctionType:
+ obj = getattr(obj, "_torchdynamo_orig_callable", obj)
+ dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id)
+
+ def _batch_setitems(self, items):
+ if self._legacy_no_dict_keys_sorting:
+ return super()._batch_setitems(items)
+ # Ignore the order of keys in a dict
+ try:
+ # Faster, but fails for unorderable elements
+ items = sorted(items)
+ except Exception: # TypeError, decimal.InvalidOperation, etc.
+ from datasets.fingerprint import Hasher
+
+ items = sorted(items, key=lambda x: Hasher.hash(x[0]))
+ dill.Pickler._batch_setitems(self, items)
+
+ def memoize(self, obj):
+ # Don't memoize strings since two identical strings can have different Python ids
+ if type(obj) is not str: # noqa: E721
+ dill.Pickler.memoize(self, obj)
+
+
+def pklregister(t):
+ """Register a custom reducer for the type."""
+
+ def proxy(func):
+ Pickler.dispatch[t] = func
+ return func
+
+ return proxy
+
+
+def dump(obj, file):
+ """Pickle an object to a file."""
+ Pickler(file, recurse=True).dump(obj)
+
+
+def dumps(obj):
+ """Pickle an object to a string."""
+ file = BytesIO()
+ dump(obj, file)
+ return file.getvalue()
+
+
+if config.DILL_VERSION < version.parse("0.3.6"):
+
+ def log(pickler, msg):
+ dill._dill.log.info(msg)
+
+elif config.DILL_VERSION.release[:3] in [
+ version.parse("0.3.6").release,
+ version.parse("0.3.7").release,
+ version.parse("0.3.8").release,
+]:
+
+ def log(pickler, msg):
+ dill._dill.logger.trace(pickler, msg)
+
+
+@pklregister(set)
+def _save_set(pickler, obj):
+ log(pickler, f"Se: {obj}")
+ try:
+ # Faster, but fails for unorderable elements
+ args = (sorted(obj),)
+ except Exception: # TypeError, decimal.InvalidOperation, etc.
+ from datasets.fingerprint import Hasher
+
+ args = (sorted(obj, key=Hasher.hash),)
+
+ pickler.save_reduce(set, args, obj=obj)
+ log(pickler, "# Se")
+
+
+def _save_regexPattern(pickler, obj):
+ import regex # type: ignore
+
+ log(pickler, f"Re: {obj}")
+ args = (obj.pattern, obj.flags)
+ pickler.save_reduce(regex.compile, args, obj=obj)
+ log(pickler, "# Re")
+
+
+def _save_tiktokenEncoding(pickler, obj):
+ import tiktoken # type: ignore
+
+ log(pickler, f"Enc: {obj}")
+ args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens)
+ pickler.save_reduce(tiktoken.Encoding, args, obj=obj)
+ log(pickler, "# Enc")
+
+
+def _save_torchTensor(pickler, obj):
+ import torch # type: ignore
+
+ # `torch.from_numpy` is not picklable in `torch>=1.11.0`
+ def create_torchTensor(np_array):
+ return torch.from_numpy(np_array)
+
+ log(pickler, f"To: {obj}")
+ args = (obj.detach().cpu().numpy(),)
+ pickler.save_reduce(create_torchTensor, args, obj=obj)
+ log(pickler, "# To")
+
+
+def _save_torchGenerator(pickler, obj):
+ import torch # type: ignore
+
+ def create_torchGenerator(state):
+ generator = torch.Generator()
+ generator.set_state(state)
+ return generator
+
+ log(pickler, f"Ge: {obj}")
+ args = (obj.get_state(),)
+ pickler.save_reduce(create_torchGenerator, args, obj=obj)
+ log(pickler, "# Ge")
+
+
+def _save_spacyLanguage(pickler, obj):
+ import spacy # type: ignore
+
+ def create_spacyLanguage(config, bytes):
+ lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"])
+ lang_inst = lang_cls.from_config(config)
+ return lang_inst.from_bytes(bytes)
+
+ log(pickler, f"Sp: {obj}")
+ args = (obj.config, obj.to_bytes())
+ pickler.save_reduce(create_spacyLanguage, args, obj=obj)
+ log(pickler, "# Sp")
+
+
+def _save_transformersPreTrainedTokenizerBase(pickler, obj):
+ log(pickler, f"Tok: {obj}")
+ # Ignore the `cache` attribute
+ state = obj.__dict__
+ if "cache" in state and isinstance(state["cache"], dict):
+ state["cache"] = {}
+ pickler.save_reduce(type(obj), (), state=state, obj=obj)
+ log(pickler, "# Tok")
+
+
+if config.DILL_VERSION < version.parse("0.3.6"):
+
+ @pklregister(CodeType)
+ def _save_code(pickler, obj):
+ """
+ From dill._dill.save_code
+ This is a modified version that removes the origin (filename + line no.)
+ of functions created in notebooks or shells for example.
+ """
+ dill._dill.log.info(f"Co: {obj}")
+ # The filename of a function is the .py file where it is defined.
+ # Filenames of functions created in notebooks or shells start with '<'
+ # ex: for ipython, and for shell
+ # Filenames of functions created in ipykernel the filename
+ # look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
+ # Moreover lambda functions have a special name: ''
+ # ex: (lambda x: x).__code__.co_name == "" # True
+ #
+ # For the hashing mechanism we ignore where the function has been defined
+ # More specifically:
+ # - we ignore the filename of special functions (filename starts with '<')
+ # - we always ignore the line number
+ # - we only use the base name of the file instead of the whole path,
+ # to be robust in case a script is moved for example.
+ #
+ # Only those two lines are different from the original implementation:
+ co_filename = (
+ ""
+ if obj.co_filename.startswith("<")
+ or (
+ len(obj.co_filename.split(os.path.sep)) > 1
+ and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
+ )
+ or obj.co_name == ""
+ else os.path.basename(obj.co_filename)
+ )
+ co_firstlineno = 1
+ # The rest is the same as in the original dill implementation
+ if dill._dill.PY3:
+ if hasattr(obj, "co_posonlyargcount"):
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename,
+ obj.co_name,
+ co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ else:
+ args = (
+ obj.co_argcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename,
+ obj.co_name,
+ co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ else:
+ args = (
+ obj.co_argcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename,
+ obj.co_name,
+ co_firstlineno,
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ pickler.save_reduce(CodeType, args, obj=obj)
+ dill._dill.log.info("# Co")
+ return
+
+elif config.DILL_VERSION.release[:3] in [
+ version.parse("0.3.6").release,
+ version.parse("0.3.7").release,
+ version.parse("0.3.8").release,
+]:
+ # From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104
+ @pklregister(CodeType)
+ def save_code(pickler, obj):
+ dill._dill.logger.trace(pickler, "Co: %s", obj)
+
+ ############################################################################################################
+ # Modification here for huggingface/datasets
+ # The filename of a function is the .py file where it is defined.
+ # Filenames of functions created in notebooks or shells start with '<'
+ # ex: for ipython, and for shell
+ # Filenames of functions created in ipykernel the filename
+ # look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
+ # Moreover lambda functions have a special name: ''
+ # ex: (lambda x: x).__code__.co_name == "" # True
+ #
+ # For the hashing mechanism we ignore where the function has been defined
+ # More specifically:
+ # - we ignore the filename of special functions (filename starts with '<')
+ # - we always ignore the line number
+ # - we only use the base name of the file instead of the whole path,
+ # to be robust in case a script is moved for example.
+ #
+ # Only those two lines are different from the original implementation:
+ co_filename = (
+ ""
+ if obj.co_filename.startswith("<")
+ or (
+ len(obj.co_filename.split(os.path.sep)) > 1
+ and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
+ )
+ or obj.co_name == ""
+ else os.path.basename(obj.co_filename)
+ )
+ co_firstlineno = 1
+ # The rest is the same as in the original dill implementation, except for the replacements:
+ # - obj.co_filename => co_filename
+ # - obj.co_firstlineno => co_firstlineno
+ ############################################################################################################
+
+ if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ obj.co_qualname,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_linetable,
+ obj.co_endlinetable,
+ obj.co_columntable,
+ obj.co_exceptiontable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ obj.co_qualname,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_linetable,
+ obj.co_exceptiontable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)
+ args = (
+ obj.co_lnotab, # for < python 3.10 [not counted in args]
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_linetable,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)
+ args = (
+ obj.co_argcount,
+ obj.co_posonlyargcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+ else: # python 3.7 (15 args)
+ args = (
+ obj.co_argcount,
+ obj.co_kwonlyargcount,
+ obj.co_nlocals,
+ obj.co_stacksize,
+ obj.co_flags,
+ obj.co_code,
+ obj.co_consts,
+ obj.co_names,
+ obj.co_varnames,
+ co_filename, # Modification for huggingface/datasets ############################################
+ obj.co_name,
+ co_firstlineno, # Modification for huggingface/datasets #########################################
+ obj.co_lnotab,
+ obj.co_freevars,
+ obj.co_cellvars,
+ )
+
+ pickler.save_reduce(dill._dill._create_code, args, obj=obj)
+ dill._dill.logger.trace(pickler, "# Co")
+ return
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/_filelock.py b/venv/lib/python3.10/site-packages/datasets/utils/_filelock.py
new file mode 100644
index 0000000000000000000000000000000000000000..19620e6e777505eaf314366f7f3c657fafc515e0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/_filelock.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+"""Utilities to handle file locking in `datasets`."""
+
+import os
+
+from filelock import FileLock as FileLock_
+from filelock import UnixFileLock
+from filelock import __version__ as _filelock_version
+from packaging import version
+
+
+class FileLock(FileLock_):
+ """
+ A `filelock.FileLock` initializer that handles long paths.
+ It also uses the current umask for lock files.
+ """
+
+ MAX_FILENAME_LENGTH = 255
+
+ def __init__(self, lock_file, *args, **kwargs):
+ # The "mode" argument is required if we want to use the current umask in filelock >= 3.10
+ # In previous previous it was already using the current umask.
+ if "mode" not in kwargs and version.parse(_filelock_version) >= version.parse("3.10.0"):
+ umask = os.umask(0o666)
+ os.umask(umask)
+ kwargs["mode"] = 0o666 & ~umask
+ lock_file = self.hash_filename_if_too_long(lock_file)
+ super().__init__(lock_file, *args, **kwargs)
+
+ @classmethod
+ def hash_filename_if_too_long(cls, path: str) -> str:
+ path = os.path.abspath(os.path.expanduser(path))
+ filename = os.path.basename(path)
+ max_filename_length = cls.MAX_FILENAME_LENGTH
+ if issubclass(cls, UnixFileLock):
+ max_filename_length = min(max_filename_length, os.statvfs(os.path.dirname(path)).f_namemax)
+ if len(filename) > max_filename_length:
+ dirname = os.path.dirname(path)
+ hashed_filename = str(hash(filename))
+ new_filename = (
+ filename[: max_filename_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
+ )
+ return os.path.join(dirname, new_filename)
+ else:
+ return path
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/beam_utils.py b/venv/lib/python3.10/site-packages/datasets/utils/beam_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..831354397cf2bb1c0ee464093484d53c037aa95c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/beam_utils.py
@@ -0,0 +1,52 @@
+import os
+
+from apache_beam.io.filesystems import FileSystems
+from apache_beam.pipeline import Pipeline
+
+from .logging import get_logger
+
+
+CHUNK_SIZE = 2 << 20 # 2mb
+logger = get_logger(__name__)
+
+
+class BeamPipeline(Pipeline):
+ """Wrapper over `apache_beam.pipeline.Pipeline` for convenience"""
+
+ def is_local(self):
+ runner = self._options.get_all_options().get("runner")
+ return runner in [None, "DirectRunner", "PortableRunner"]
+
+
+def upload_local_to_remote(local_file_path, remote_file_path, force_upload=False):
+ """Use the Beam Filesystems to upload to a remote directory on gcs/s3/hdfs..."""
+ fs = FileSystems
+ if fs.exists(remote_file_path):
+ if force_upload:
+ logger.info(f"Remote path already exist: {remote_file_path}. Overwriting it as force_upload=True.")
+ else:
+ logger.info(f"Remote path already exist: {remote_file_path}. Skipping it as force_upload=False.")
+ return
+ with fs.create(remote_file_path) as remote_file:
+ with open(local_file_path, "rb") as local_file:
+ chunk = local_file.read(CHUNK_SIZE)
+ while chunk:
+ remote_file.write(chunk)
+ chunk = local_file.read(CHUNK_SIZE)
+
+
+def download_remote_to_local(remote_file_path, local_file_path, force_download=False):
+ """Use the Beam Filesystems to download from a remote directory on gcs/s3/hdfs..."""
+ fs = FileSystems
+ if os.path.exists(local_file_path):
+ if force_download:
+ logger.info(f"Local path already exist: {remote_file_path}. Overwriting it as force_upload=True.")
+ else:
+ logger.info(f"Local path already exist: {remote_file_path}. Skipping it as force_upload=False.")
+ return
+ with fs.open(remote_file_path) as remote_file:
+ with open(local_file_path, "wb") as local_file:
+ chunk = remote_file.read(CHUNK_SIZE)
+ while chunk:
+ local_file.write(chunk)
+ chunk = remote_file.read(CHUNK_SIZE)
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/cache.py b/venv/lib/python3.10/site-packages/datasets/utils/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..5485441439d17051eb9a59bc7bc4155321923958
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/cache.py
@@ -0,0 +1,283 @@
+import json
+import os
+import re
+import shutil
+import tempfile
+from contextlib import contextmanager
+from functools import partial
+from pathlib import Path
+from urllib.parse import urljoin, urlparse
+
+import requests
+
+from datasets import DownloadConfig, config
+from datasets.utils.extract import ExtractManager
+from datasets.utils.file_utils import (
+ _raise_if_offline_mode_is_enabled,
+ ftp_get,
+ ftp_head,
+ get_authentication_headers_for_url,
+ hash_url_to_filename,
+ http_get,
+ http_head,
+ is_local_path,
+ is_remote_url,
+ logger,
+)
+from datasets.utils.filelock import FileLock
+
+
+def cached_path(
+ url_or_filename,
+ download_config=None,
+ **download_kwargs,
+) -> str:
+ """
+ Given something that might be a URL (or might be a local path),
+ determine which. If it's a URL, download the file and cache it, and
+ return the path to the cached file. If it's already a local path,
+ make sure the file exists and then return the path.
+
+ Return:
+ Local path (string)
+
+ Raises:
+ FileNotFoundError: in case of non-recoverable file
+ (non-existent or no cache on disk)
+ ConnectionError: in case of unreachable url
+ and no cache on disk
+ ValueError: if it couldn't parse the url or filename correctly
+ requests.exceptions.ConnectionError: in case of internet connection issue
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+
+ cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
+ if isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+ if isinstance(url_or_filename, Path):
+ url_or_filename = str(url_or_filename)
+
+ if is_remote_url(url_or_filename):
+ # URL, so get it from the cache (downloading if necessary)
+ output_path = get_from_cache(
+ url_or_filename,
+ cache_dir=cache_dir,
+ force_download=download_config.force_download,
+ proxies=download_config.proxies,
+ resume_download=download_config.resume_download,
+ user_agent=download_config.user_agent,
+ local_files_only=download_config.local_files_only,
+ use_etag=download_config.use_etag,
+ max_retries=download_config.max_retries,
+ use_auth_token=download_config.use_auth_token,
+ ignore_url_params=download_config.ignore_url_params,
+ download_desc=download_config.download_desc,
+ )
+ elif os.path.exists(url_or_filename):
+ # File, and it exists.
+ output_path = url_or_filename
+ elif is_local_path(url_or_filename):
+ # File, but it doesn't exist.
+ raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
+ else:
+ # Something unknown
+ raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
+
+ if output_path is None:
+ return output_path
+
+ if download_config.extract_compressed_file:
+ output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
+ output_path, force_extract=download_config.force_extract
+ )
+
+ return output_path
+
+
+def get_from_cache(
+ url,
+ cache_dir=None,
+ force_download=False,
+ proxies=None,
+ etag_timeout=100,
+ resume_download=False,
+ user_agent=None,
+ local_files_only=False,
+ use_etag=True,
+ max_retries=0,
+ use_auth_token=None,
+ ignore_url_params=False,
+ download_desc=None,
+) -> str:
+ """
+ Given a URL, look for the corresponding file in the local cache.
+ If it's not there, download it. Then return the path to the cached file.
+
+ Return:
+ Local path (string)
+
+ Raises:
+ FileNotFoundError: in case of non-recoverable file
+ (non-existent or no cache on disk)
+ ConnectionError: in case of unreachable url
+ and no cache on disk
+ """
+ if cache_dir is None:
+ cache_dir = config.HF_DATASETS_CACHE
+ if isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+
+ os.makedirs(cache_dir, exist_ok=True)
+
+ if ignore_url_params:
+ # strip all query parameters and #fragments from the URL
+ cached_url = urljoin(url, urlparse(url).path)
+ else:
+ cached_url = url # additional parameters may be added to the given URL
+
+ connected = False
+ response = None
+ cookies = None
+ etag = None
+ head_error = None
+
+ # Try a first time to file the file on the local file system without eTag (None)
+ # if we don't ask for 'force_download' then we spare a request
+ filename = hash_url_to_filename(cached_url, etag=None)
+ cache_path = os.path.join(cache_dir, filename)
+
+ if os.path.exists(cache_path) and not force_download and not use_etag:
+ return cache_path
+
+ # Prepare headers for authentication
+ headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
+ if user_agent is not None:
+ headers["user-agent"] = user_agent
+
+ # We don't have the file locally or we need an eTag
+ if not local_files_only:
+ if url.startswith("ftp://"):
+ connected = ftp_head(url)
+ try:
+ response = http_head(
+ url,
+ allow_redirects=True,
+ proxies=proxies,
+ timeout=etag_timeout,
+ max_retries=max_retries,
+ headers=headers,
+ )
+ if response.status_code == 200: # ok
+ etag = response.headers.get("ETag") if use_etag else None
+ for k, v in response.cookies.items():
+ # In some edge cases, we need to get a confirmation token
+ if k.startswith("download_warning") and "drive.google.com" in url:
+ url += "&confirm=" + v
+ cookies = response.cookies
+ connected = True
+ # Fix Google Drive URL to avoid Virus scan warning
+ if "drive.google.com" in url and "confirm=" not in url:
+ url += "&confirm=t"
+ # In some edge cases, head request returns 400 but the connection is actually ok
+ elif (
+ (response.status_code == 400 and "firebasestorage.googleapis.com" in url)
+ or (response.status_code == 405 and "drive.google.com" in url)
+ or (
+ response.status_code == 403
+ and (
+ re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
+ or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
+ )
+ )
+ or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
+ ):
+ connected = True
+ logger.info(f"Couldn't get ETag version for url {url}")
+ elif response.status_code == 401 and config.HF_ENDPOINT in url and use_auth_token is None:
+ raise ConnectionError(
+ f"Unauthorized for URL {url}. Please use the parameter `use_auth_token=True` after logging in with `huggingface-cli login`"
+ )
+ except (OSError, requests.exceptions.Timeout) as e:
+ # not connected
+ head_error = e
+ pass
+
+ # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
+ # try to get the last downloaded one
+ if not connected:
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+ if local_files_only:
+ raise FileNotFoundError(
+ f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
+ " disabled. To enable file online look-ups, set 'local_files_only' to False."
+ )
+ elif response is not None and response.status_code == 404:
+ raise FileNotFoundError(f"Couldn't find file at {url}")
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ if head_error is not None:
+ raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
+ elif response is not None:
+ raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
+ else:
+ raise ConnectionError(f"Couldn't reach {url}")
+
+ # Try a second time
+ filename = hash_url_to_filename(cached_url, etag)
+ cache_path = os.path.join(cache_dir, filename)
+
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+
+ # From now on, connected is True.
+ # Prevent parallel downloads of the same file with a lock.
+ lock_path = cache_path + ".lock"
+ with FileLock(lock_path):
+ if resume_download:
+ incomplete_path = cache_path + ".incomplete"
+
+ @contextmanager
+ def _resumable_file_manager():
+ with open(incomplete_path, "a+b") as f:
+ yield f
+
+ temp_file_manager = _resumable_file_manager
+ if os.path.exists(incomplete_path):
+ resume_size = os.stat(incomplete_path).st_size
+ else:
+ resume_size = 0
+ else:
+ temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
+ resume_size = 0
+
+ # Download to temporary file, then copy to cache dir once finished.
+ # Otherwise you get corrupt cache entries if the download gets interrupted.
+ with temp_file_manager() as temp_file:
+ logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
+
+ # GET file object
+ if url.startswith("ftp://"):
+ ftp_get(url, temp_file)
+ else:
+ http_get(
+ url,
+ temp_file,
+ proxies=proxies,
+ resume_size=resume_size,
+ headers=headers,
+ cookies=cookies,
+ max_retries=max_retries,
+ desc=download_desc,
+ )
+
+ logger.info(f"storing {url} in cache at {cache_path}")
+ shutil.move(temp_file.name, cache_path)
+
+ logger.info(f"creating metadata file for {cache_path}")
+ meta = {"url": url, "etag": etag}
+ meta_path = cache_path + ".json"
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
+ json.dump(meta, meta_file)
+
+ return cache_path
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/deprecation_utils.py b/venv/lib/python3.10/site-packages/datasets/utils/deprecation_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f05ecbeaa3eae5476e99c461dbede9ebfa111eb0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/deprecation_utils.py
@@ -0,0 +1,105 @@
+import enum
+import inspect
+import warnings
+from functools import wraps
+from typing import Callable, Optional
+
+from .logging import get_logger
+
+
+_emitted_deprecation_warnings = set()
+logger = get_logger(__name__)
+
+
+def deprecated(help_message: Optional[str] = None):
+ """Decorator to mark a class or a function as deprecated.
+
+ Args:
+ help_message (:obj:`str`, optional): An optional message to guide the user on how to
+ switch to non-deprecated usage of the library.
+ """
+
+ def decorator(deprecated_class_or_function: Callable):
+ global _emitted_deprecation_warnings
+
+ if inspect.isclass(deprecated_class_or_function):
+ deprecated_function = deprecated_class_or_function.__init__
+ name = deprecated_class_or_function.__name__
+ else:
+ deprecated_function = deprecated_class_or_function
+ name = deprecated_function.__name__
+ # Support deprecating __init__ class method: class name instead
+ name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2]
+
+ warning_msg = (
+ f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}"
+ if help_message
+ else ""
+ )
+
+ @wraps(deprecated_function)
+ def wrapper(*args, **kwargs):
+ func_hash = hash(deprecated_function)
+ if func_hash not in _emitted_deprecation_warnings:
+ warnings.warn(warning_msg, category=FutureWarning, stacklevel=2)
+ _emitted_deprecation_warnings.add(func_hash)
+ return deprecated_function(*args, **kwargs)
+
+ wrapper._decorator_name_ = "deprecated"
+
+ if inspect.isclass(deprecated_class_or_function):
+ deprecated_class_or_function.__init__ = wrapper
+ return deprecated_class_or_function
+ else:
+ return wrapper
+
+ return decorator
+
+
+class OnAccess(enum.EnumMeta):
+ """
+ Enum metaclass that calls a user-specified function whenever a member is accessed.
+ """
+
+ def __getattribute__(cls, name):
+ obj = super().__getattribute__(name)
+ if isinstance(obj, enum.Enum) and obj._on_access:
+ obj._on_access()
+ return obj
+
+ def __getitem__(cls, name):
+ member = super().__getitem__(name)
+ if member._on_access:
+ member._on_access()
+ return member
+
+ def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
+ obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start)
+ if isinstance(obj, enum.Enum) and obj._on_access:
+ obj._on_access()
+ return obj
+
+
+class DeprecatedEnum(enum.Enum, metaclass=OnAccess):
+ """
+ Enum class that calls `deprecate` method whenever a member is accessed.
+ """
+
+ def __new__(cls, value):
+ member = object.__new__(cls)
+ member._value_ = value
+ member._on_access = member.deprecate
+ return member
+
+ @property
+ def help_message(self):
+ return ""
+
+ def deprecate(self):
+ help_message = f" {self.help_message}" if self.help_message else ""
+ warnings.warn(
+ f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets."
+ + help_message,
+ FutureWarning,
+ stacklevel=3,
+ )
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/doc_utils.py b/venv/lib/python3.10/site-packages/datasets/utils/doc_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ef8bcb4e70725ad086cb817e0ec4551d1c0966e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/doc_utils.py
@@ -0,0 +1,15 @@
+from typing import Callable
+
+
+def is_documented_by(function_with_docstring: Callable):
+ """Decorator to share docstrings across common functions.
+
+ Args:
+ function_with_docstring (`Callable`): Name of the function with the docstring.
+ """
+
+ def wrapper(target_function):
+ target_function.__doc__ = function_with_docstring.__doc__
+ return target_function
+
+ return wrapper
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/logging.py b/venv/lib/python3.10/site-packages/datasets/utils/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..dffd5ce46e0d2da5cbbfb023003c3f4caae86093
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/logging.py
@@ -0,0 +1,179 @@
+# Copyright 2020 Optuna, Hugging Face
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Logging utilities."""
+
+import logging
+import os
+from logging import (
+ CRITICAL, # NOQA
+ DEBUG, # NOQA
+ ERROR, # NOQA
+ FATAL, # NOQA
+ INFO, # NOQA
+ NOTSET, # NOQA
+ WARN, # NOQA
+ WARNING, # NOQA
+)
+from typing import Optional
+
+from .tqdm import ( # noqa: F401 # imported for backward compatibility
+ disable_progress_bar,
+ enable_progress_bar,
+ is_progress_bar_enabled,
+ tqdm,
+)
+
+
+log_levels = {
+ "debug": logging.DEBUG,
+ "info": logging.INFO,
+ "warning": logging.WARNING,
+ "error": logging.ERROR,
+ "critical": logging.CRITICAL,
+}
+
+_default_log_level = logging.WARNING
+
+
+def _get_default_logging_level():
+ """
+ If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level.
+ If it is not - fall back to ``_default_log_level``
+ """
+ env_level_str = os.getenv("DATASETS_VERBOSITY", None)
+ if env_level_str:
+ if env_level_str in log_levels:
+ return log_levels[env_level_str]
+ else:
+ logging.getLogger().warning(
+ f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
+ f"has to be one of: { ', '.join(log_levels.keys()) }"
+ )
+ return _default_log_level
+
+
+def _get_library_name() -> str:
+ return __name__.split(".")[0]
+
+
+def _get_library_root_logger() -> logging.Logger:
+ return logging.getLogger(_get_library_name())
+
+
+def _configure_library_root_logger() -> None:
+ # Apply our default configuration to the library root logger.
+ library_root_logger = _get_library_root_logger()
+ library_root_logger.addHandler(logging.StreamHandler())
+ library_root_logger.setLevel(_get_default_logging_level())
+
+
+def _reset_library_root_logger() -> None:
+ library_root_logger = _get_library_root_logger()
+ library_root_logger.setLevel(logging.NOTSET)
+
+
+def get_logger(name: Optional[str] = None) -> logging.Logger:
+ """Return a logger with the specified name.
+ This function can be used in dataset scripts.
+ """
+ if name is None:
+ name = _get_library_name()
+ return logging.getLogger(name)
+
+
+def get_verbosity() -> int:
+ """Return the current level for the HuggingFace datasets library's root logger.
+ Returns:
+ Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
+
+
+
+ HuggingFace datasets library has following logging levels:
+ - `datasets.logging.CRITICAL`, `datasets.logging.FATAL`
+ - `datasets.logging.ERROR`
+ - `datasets.logging.WARNING`, `datasets.logging.WARN`
+ - `datasets.logging.INFO`
+ - `datasets.logging.DEBUG`
+
+
+ """
+ return _get_library_root_logger().getEffectiveLevel()
+
+
+def set_verbosity(verbosity: int) -> None:
+ """Set the level for the Hugging Face Datasets library's root logger.
+ Args:
+ verbosity:
+ Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
+ """
+ _get_library_root_logger().setLevel(verbosity)
+
+
+def set_verbosity_info():
+ """Set the level for the Hugging Face datasets library's root logger to `INFO`.
+
+ This will display most of the logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`.
+ """
+ return set_verbosity(INFO)
+
+
+def set_verbosity_warning():
+ """Set the level for the Hugging Face datasets library's root logger to `WARNING`.
+
+ This will display only the warning and errors logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`.
+ """
+ return set_verbosity(WARNING)
+
+
+def set_verbosity_debug():
+ """Set the level for the Hugging Face datasets library's root logger to `DEBUG`.
+
+ This will display all the logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`.
+ """
+ return set_verbosity(DEBUG)
+
+
+def set_verbosity_error():
+ """Set the level for the Hugging Face datasets library's root logger to `ERROR`.
+
+ This will display only the errors logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`.
+ """
+ return set_verbosity(ERROR)
+
+
+def disable_propagation() -> None:
+ """Disable propagation of the library log outputs.
+ Note that log propagation is disabled by default.
+ """
+ _get_library_root_logger().propagate = False
+
+
+def enable_propagation() -> None:
+ """Enable propagation of the library log outputs.
+ Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has
+ been configured.
+ """
+ _get_library_root_logger().propagate = True
+
+
+# Configure the library root logger at the module level (singleton-like)
+_configure_library_root_logger()
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/py_utils.py b/venv/lib/python3.10/site-packages/datasets/utils/py_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1304a97166774f70cbb95e2ebfb046cb92206b13
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/py_utils.py
@@ -0,0 +1,731 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Some python utils function and classes."""
+
+import copy
+import functools
+import itertools
+import multiprocessing.pool
+import os
+import queue
+import re
+import types
+import warnings
+from contextlib import contextmanager
+from dataclasses import fields, is_dataclass
+from multiprocessing import Manager
+from pathlib import Path
+from queue import Empty
+from shutil import disk_usage
+from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union
+from urllib.parse import urlparse
+
+import multiprocess
+import multiprocess.pool
+import numpy as np
+from tqdm.auto import tqdm
+
+from .. import config
+from ..parallel import parallel_map
+from . import logging
+from . import tqdm as hf_tqdm
+from ._dill import ( # noqa: F401 # imported for backward compatibility. TODO: remove in 3.0.0
+ Pickler,
+ dump,
+ dumps,
+ pklregister,
+)
+from ._filelock import FileLock
+
+
+try: # pragma: no branch
+ import typing_extensions as _typing_extensions
+ from typing_extensions import Final, Literal
+except ImportError:
+ _typing_extensions = Literal = Final = None
+
+
+logger = logging.get_logger(__name__)
+
+
+# NOTE: When used on an instance method, the cache is shared across all
+# instances and IS NOT per-instance.
+# See
+# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance
+# For @property methods, use @memoized_property below.
+memoize = functools.lru_cache
+
+
+def size_str(size_in_bytes):
+ """Returns a human readable size string.
+
+ If size_in_bytes is None, then returns "Unknown size".
+
+ For example `size_str(1.5 * datasets.units.GiB) == "1.50 GiB"`.
+
+ Args:
+ size_in_bytes: `int` or `None`, the size, in bytes, that we want to
+ format as a human-readable size string.
+ """
+ if not size_in_bytes:
+ return "Unknown size"
+
+ _NAME_LIST = [("PiB", 2**50), ("TiB", 2**40), ("GiB", 2**30), ("MiB", 2**20), ("KiB", 2**10)]
+
+ size_in_bytes = float(size_in_bytes)
+ for name, size_bytes in _NAME_LIST:
+ value = size_in_bytes / size_bytes
+ if value >= 1.0:
+ return f"{value:.2f} {name}"
+ return f"{int(size_in_bytes)} bytes"
+
+
+def convert_file_size_to_int(size: Union[int, str]) -> int:
+ """
+ Converts a size expressed as a string with digits an unit (like `"50MB"`) to an integer (in bytes).
+
+ Args:
+ size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
+
+ Example:
+
+ ```py
+ >>> convert_file_size_to_int("1MiB")
+ 1048576
+ ```
+ """
+ if isinstance(size, int):
+ return size
+ if size.upper().endswith("PIB"):
+ return int(size[:-3]) * (2**50)
+ if size.upper().endswith("TIB"):
+ return int(size[:-3]) * (2**40)
+ if size.upper().endswith("GIB"):
+ return int(size[:-3]) * (2**30)
+ if size.upper().endswith("MIB"):
+ return int(size[:-3]) * (2**20)
+ if size.upper().endswith("KIB"):
+ return int(size[:-3]) * (2**10)
+ if size.upper().endswith("PB"):
+ int_size = int(size[:-2]) * (10**15)
+ return int_size // 8 if size.endswith("b") else int_size
+ if size.upper().endswith("TB"):
+ int_size = int(size[:-2]) * (10**12)
+ return int_size // 8 if size.endswith("b") else int_size
+ if size.upper().endswith("GB"):
+ int_size = int(size[:-2]) * (10**9)
+ return int_size // 8 if size.endswith("b") else int_size
+ if size.upper().endswith("MB"):
+ int_size = int(size[:-2]) * (10**6)
+ return int_size // 8 if size.endswith("b") else int_size
+ if size.upper().endswith("KB"):
+ int_size = int(size[:-2]) * (10**3)
+ return int_size // 8 if size.endswith("b") else int_size
+ raise ValueError(f"`size={size}` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")
+
+
+def glob_pattern_to_regex(pattern):
+ # partially taken from fsspec:
+ # https://github.com/fsspec/filesystem_spec/blob/697d0f8133d8a5fbc3926e4761d7ecd51337ce50/fsspec/asyn.py#L735
+ return (
+ pattern.replace("\\", r"\\")
+ .replace(".", r"\.")
+ .replace("*", ".*")
+ .replace("+", r"\+")
+ .replace("//", "/")
+ .replace("(", r"\(")
+ .replace(")", r"\)")
+ .replace("|", r"\|")
+ .replace("^", r"\^")
+ .replace("$", r"\$")
+ .rstrip("/")
+ .replace("?", ".")
+ )
+
+
+def string_to_dict(string: str, pattern: str) -> Dict[str, str]:
+ """Un-format a string using a python f-string pattern.
+ From https://stackoverflow.com/a/36838374
+
+ Example::
+
+ >>> p = 'hello, my name is {name} and I am a {age} year old {what}'
+ >>> s = p.format(name='cody', age=18, what='quarterback')
+ >>> s
+ 'hello, my name is cody and I am a 18 year old quarterback'
+ >>> string_to_dict(s, p)
+ {'age': '18', 'name': 'cody', 'what': 'quarterback'}
+
+ Args:
+ string (str): input string
+ pattern (str): pattern formatted like a python f-string
+
+ Returns:
+ Dict[str, str]: dictionary of variable -> value, retrieved from the input using the pattern
+
+ Raises:
+ ValueError: if the string doesn't match the pattern
+ """
+ regex = re.sub(r"{(.+?)}", r"(?P<_\1>.+)", pattern)
+ result = re.search(regex, string)
+ if result is None:
+ raise ValueError(f"String {string} doesn't match the pattern {pattern}")
+ values = list(result.groups())
+ keys = re.findall(r"{(.+?)}", pattern)
+ _dict = dict(zip(keys, values))
+ return _dict
+
+
+def asdict(obj):
+ """Convert an object to its dictionary representation recursively.
+
+
+ """
+
+ # Implementation based on https://docs.python.org/3/library/dataclasses.html#dataclasses.asdict
+
+ def _is_dataclass_instance(obj):
+ # https://docs.python.org/3/library/dataclasses.html#dataclasses.is_dataclass
+ return is_dataclass(obj) and not isinstance(obj, type)
+
+ def _asdict_inner(obj):
+ if _is_dataclass_instance(obj):
+ result = {}
+ for f in fields(obj):
+ value = _asdict_inner(getattr(obj, f.name))
+ if not f.init or value != f.default or f.metadata.get("include_in_asdict_even_if_is_default", False):
+ result[f.name] = value
+ return result
+ elif isinstance(obj, tuple) and hasattr(obj, "_fields"):
+ # obj is a namedtuple
+ return type(obj)(*[_asdict_inner(v) for v in obj])
+ elif isinstance(obj, (list, tuple)):
+ # Assume we can create an object of this type by passing in a
+ # generator (which is not true for namedtuples, handled
+ # above).
+ return type(obj)(_asdict_inner(v) for v in obj)
+ elif isinstance(obj, dict):
+ return {_asdict_inner(k): _asdict_inner(v) for k, v in obj.items()}
+ else:
+ return copy.deepcopy(obj)
+
+ if not isinstance(obj, dict) and not _is_dataclass_instance(obj):
+ raise TypeError(f"{obj} is not a dict or a dataclass")
+
+ return _asdict_inner(obj)
+
+
+@contextmanager
+def temporary_assignment(obj, attr, value):
+ """Temporarily assign obj.attr to value."""
+ original = getattr(obj, attr, None)
+ setattr(obj, attr, value)
+ try:
+ yield
+ finally:
+ setattr(obj, attr, original)
+
+
+@contextmanager
+def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):
+ """Temporarily set the random seed. This works for python numpy, pytorch and tensorflow."""
+ np_state = np.random.get_state()
+ np.random.seed(seed)
+
+ if set_pytorch and config.TORCH_AVAILABLE:
+ import torch
+
+ torch_state = torch.random.get_rng_state()
+ torch.random.manual_seed(seed)
+
+ if torch.cuda.is_available():
+ torch_cuda_states = torch.cuda.get_rng_state_all()
+ torch.cuda.manual_seed_all(seed)
+
+ if set_tensorflow and config.TF_AVAILABLE:
+ import tensorflow as tf
+ from tensorflow.python.eager import context as tfpycontext
+
+ tf_state = tf.random.get_global_generator()
+ temp_gen = tf.random.Generator.from_seed(seed)
+ tf.random.set_global_generator(temp_gen)
+
+ if not tf.executing_eagerly():
+ raise ValueError("Setting random seed for TensorFlow is only available in eager mode")
+
+ tf_context = tfpycontext.context() # eager mode context
+ tf_seed = tf_context._seed
+ tf_rng_initialized = hasattr(tf_context, "_rng")
+ if tf_rng_initialized:
+ tf_rng = tf_context._rng
+ tf_context._set_global_seed(seed)
+
+ try:
+ yield
+ finally:
+ np.random.set_state(np_state)
+
+ if set_pytorch and config.TORCH_AVAILABLE:
+ torch.random.set_rng_state(torch_state)
+ if torch.cuda.is_available():
+ torch.cuda.set_rng_state_all(torch_cuda_states)
+
+ if set_tensorflow and config.TF_AVAILABLE:
+ tf.random.set_global_generator(tf_state)
+
+ tf_context._seed = tf_seed
+ if tf_rng_initialized:
+ tf_context._rng = tf_rng
+ else:
+ delattr(tf_context, "_rng")
+
+
+def unique_values(values):
+ """Iterate over iterable and return only unique values in order."""
+ seen = set()
+ for value in values:
+ if value not in seen:
+ seen.add(value)
+ yield value
+
+
+def no_op_if_value_is_null(func):
+ """If the value is None, return None, else call `func`."""
+
+ def wrapper(value):
+ return func(value) if value is not None else None
+
+ return wrapper
+
+
+def first_non_null_value(iterable):
+ """Return the index and the value of the first non-null value in the iterable. If all values are None, return -1 as index."""
+ for i, value in enumerate(iterable):
+ if value is not None:
+ return i, value
+ return -1, None
+
+
+def zip_dict(*dicts):
+ """Iterate over items of dictionaries grouped by their keys."""
+ for key in unique_values(itertools.chain(*dicts)): # set merge all keys
+ # Will raise KeyError if the dict don't have the same keys
+ yield key, tuple(d[key] for d in dicts)
+
+
+class NonMutableDict(dict):
+ """Dict where keys can only be added but not modified.
+
+ Will raise an error if the user try to overwrite one key. The error message
+ can be customized during construction. It will be formatted using {key} for
+ the overwritten key.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._error_msg = kwargs.pop(
+ "error_msg",
+ "Try to overwrite existing key: {key}",
+ )
+ if kwargs:
+ raise ValueError("NonMutableDict cannot be initialized with kwargs.")
+ super().__init__(*args, **kwargs)
+
+ def __setitem__(self, key, value):
+ if key in self:
+ raise ValueError(self._error_msg.format(key=key))
+ return super().__setitem__(key, value)
+
+ def update(self, other):
+ if any(k in self for k in other):
+ raise ValueError(self._error_msg.format(key=set(self) & set(other)))
+ return super().update(other)
+
+
+class classproperty(property): # pylint: disable=invalid-name
+ """Descriptor to be used as decorator for @classmethods."""
+
+ def __get__(self, obj, objtype=None):
+ return self.fget.__get__(None, objtype)()
+
+
+def _single_map_nested(args):
+ """Apply a function recursively to each element of a nested data struct."""
+ function, data_struct, batched, batch_size, types, rank, disable_tqdm, desc = args
+
+ # Singleton first to spare some computation
+ if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
+ return function(data_struct)
+ if (
+ batched
+ and not isinstance(data_struct, dict)
+ and isinstance(data_struct, types)
+ and all(not isinstance(v, types) for v in data_struct)
+ ):
+ return [mapped_item for batch in iter_batched(data_struct, batch_size) for mapped_item in function(batch)]
+
+ # Reduce logging to keep things readable in multiprocessing with tqdm
+ if rank is not None and logging.get_verbosity() < logging.WARNING:
+ logging.set_verbosity_warning()
+ # Print at least one thing to fix tqdm in notebooks in multiprocessing
+ # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308
+ if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__):
+ print(" ", end="", flush=True)
+
+ # Loop over single examples or batches and write to buffer/file if examples are to be updated
+ pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct
+ pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc
+ with hf_tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) as pbar:
+ if isinstance(data_struct, dict):
+ return {
+ k: _single_map_nested((function, v, batched, batch_size, types, None, True, None)) for k, v in pbar
+ }
+ else:
+ mapped = [_single_map_nested((function, v, batched, batch_size, types, None, True, None)) for v in pbar]
+ if isinstance(data_struct, list):
+ return mapped
+ elif isinstance(data_struct, tuple):
+ return tuple(mapped)
+ else:
+ return np.array(mapped)
+
+
+def map_nested(
+ function: Callable[[Any], Any],
+ data_struct: Any,
+ dict_only: bool = False,
+ map_list: bool = True,
+ map_tuple: bool = False,
+ map_numpy: bool = False,
+ num_proc: Optional[int] = None,
+ parallel_min_length: int = 2,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ types: Optional[tuple] = None,
+ disable_tqdm: bool = True,
+ desc: Optional[str] = None,
+) -> Any:
+ """Apply a function recursively to each element of a nested data struct.
+
+ Use multiprocessing if num_proc > 1 and the length of data_struct is greater than or equal to
+ `parallel_min_length`.
+
+
+
+ Before version 2.5.0, multiprocessing was not used if `num_proc` was greater than or equal to ``len(iterable)``.
+
+ Now, if `num_proc` is greater than or equal to ``len(iterable)``, `num_proc` is set to ``len(iterable)`` and
+ multiprocessing is used.
+
+
+
+ Args:
+ function (`Callable`): Function to be applied to `data_struct`.
+ data_struct (`Any`): Data structure to apply `function` to.
+ dict_only (`bool`, default `False`): Whether only apply `function` recursively to `dict` values in
+ `data_struct`.
+ map_list (`bool`, default `True`): Whether also apply `function` recursively to `list` elements (besides `dict`
+ values).
+ map_tuple (`bool`, default `False`): Whether also apply `function` recursively to `tuple` elements (besides
+ `dict` values).
+ map_numpy (`bool, default `False`): Whether also apply `function` recursively to `numpy.array` elements (besides
+ `dict` values).
+ num_proc (`int`, *optional*): Number of processes.
+ The level in the data struct used for multiprocessing is the first level that has smaller sub-structs,
+ starting from the root.
+ parallel_min_length (`int`, default `2`): Minimum length of `data_struct` required for parallel
+ processing.
+
+ batched (`bool`, defaults to `False`):
+ Provide batch of items to `function`.
+
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of items per batch provided to `function` if `batched=True`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full iterable as a single batch to `function`.
+
+ types (`tuple`, *optional*): Additional types (besides `dict` values) to apply `function` recursively to their
+ elements.
+ disable_tqdm (`bool`, default `True`): Whether to disable the tqdm progressbar.
+ desc (`str`, *optional*): Prefix for the tqdm progressbar.
+
+ Returns:
+ `Any`
+ """
+ if types is None:
+ types = []
+ if not dict_only:
+ if map_list:
+ types.append(list)
+ if map_tuple:
+ types.append(tuple)
+ if map_numpy:
+ types.append(np.ndarray)
+ types = tuple(types)
+
+ # Singleton
+ if not isinstance(data_struct, dict) and not isinstance(data_struct, types):
+ if batched:
+ data_struct = [data_struct]
+ mapped = function(data_struct)
+ if batched:
+ mapped = mapped[0]
+ return mapped
+
+ iterable = list(data_struct.values()) if isinstance(data_struct, dict) else data_struct
+
+ if num_proc is None:
+ num_proc = 1
+ if any(isinstance(v, types) and len(v) > len(iterable) for v in iterable):
+ mapped = [
+ map_nested(
+ function=function,
+ data_struct=obj,
+ num_proc=num_proc,
+ parallel_min_length=parallel_min_length,
+ batched=batched,
+ batch_size=batch_size,
+ types=types,
+ )
+ for obj in iterable
+ ]
+ elif num_proc != -1 and num_proc <= 1 or len(iterable) < parallel_min_length:
+ if batched:
+ if batch_size is None or batch_size <= 0:
+ batch_size = max(len(iterable) // num_proc + int(len(iterable) % num_proc > 0), 1)
+ iterable = list(iter_batched(iterable, batch_size))
+ mapped = [
+ _single_map_nested((function, obj, batched, batch_size, types, None, True, None))
+ for obj in hf_tqdm(iterable, disable=disable_tqdm, desc=desc)
+ ]
+ if batched:
+ mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]
+ else:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ message=".* is experimental and might be subject to breaking changes in the future\\.$",
+ category=UserWarning,
+ )
+ if batched:
+ if batch_size is None or batch_size <= 0:
+ batch_size = len(iterable) // num_proc + int(len(iterable) % num_proc > 0)
+ iterable = list(iter_batched(iterable, batch_size))
+ mapped = parallel_map(
+ function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, _single_map_nested
+ )
+ if batched:
+ mapped = [mapped_item for mapped_batch in mapped for mapped_item in mapped_batch]
+
+ if isinstance(data_struct, dict):
+ return dict(zip(data_struct.keys(), mapped))
+ else:
+ if isinstance(data_struct, list):
+ return mapped
+ elif isinstance(data_struct, tuple):
+ return tuple(mapped)
+ else:
+ return np.array(mapped)
+
+
+class NestedDataStructure:
+ def __init__(self, data=None):
+ self.data = data if data is not None else []
+
+ def flatten(self, data=None):
+ data = data if data is not None else self.data
+ if isinstance(data, dict):
+ return self.flatten(list(data.values()))
+ elif isinstance(data, (list, tuple)):
+ return [flattened for item in data for flattened in self.flatten(item)]
+ else:
+ return [data]
+
+
+def has_sufficient_disk_space(needed_bytes, directory="."):
+ try:
+ free_bytes = disk_usage(os.path.abspath(directory)).free
+ except OSError:
+ return True
+ return needed_bytes < free_bytes
+
+
+def _convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
+ """Convert a link to a file on a github repo in a link to the raw github object."""
+ parsed = urlparse(url_path)
+ sub_directory = None
+ if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
+ if "blob" in url_path:
+ if not url_path.endswith(".py"):
+ raise ValueError(f"External import from github at {url_path} should point to a file ending with '.py'")
+ url_path = url_path.replace("blob", "raw") # Point to the raw file
+ else:
+ # Parse github url to point to zip
+ github_path = parsed.path[1:]
+ repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
+ repo_owner, repo_name = repo_info.split("/")
+ url_path = f"https://github.com/{repo_owner}/{repo_name}/archive/{branch}.zip"
+ sub_directory = f"{repo_name}-{branch}"
+ return url_path, sub_directory
+
+
+def lock_importable_file(importable_local_file: str) -> FileLock:
+ # Check the directory with a unique name in our dataset folder
+ # path is: ./datasets/dataset_name/hash_from_code/script.py
+ # we use a hash as subdirectory_name to be able to have multiple versions of a dataset/metric processing file together
+ importable_directory_path = str(Path(importable_local_file).resolve().parent.parent)
+ lock_path = importable_directory_path + ".lock"
+ return FileLock(lock_path)
+
+
+def get_imports(file_path: str) -> Tuple[str, str, str, str]:
+ """Find whether we should import or clone additional files for a given processing script.
+ And list the import.
+
+ We allow:
+ - library dependencies,
+ - local dependencies and
+ - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
+ external dependencies will be downloaded (and extracted if needed in the dataset folder).
+ We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
+
+ Note that only direct import in the dataset processing script will be handled
+ We don't recursively explore the additional import to download further files.
+
+ Example::
+
+ import tensorflow
+ import .c4_utils
+ import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
+ """
+ lines = []
+ with open(file_path, encoding="utf-8") as f:
+ lines.extend(f.readlines())
+
+ logger.debug(f"Checking {file_path} for additional imports.")
+ imports: List[Tuple[str, str, str, Optional[str]]] = []
+ is_in_docstring = False
+ for line in lines:
+ docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
+
+ if len(docstr_start_match) == 1:
+ # flip True <=> False only if doctstring
+ # starts at line without finishing
+ is_in_docstring = not is_in_docstring
+
+ if is_in_docstring:
+ # import statements in doctstrings should
+ # not be added as required dependencies
+ continue
+
+ match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
+ if match is None:
+ match = re.match(
+ r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
+ line,
+ flags=re.MULTILINE,
+ )
+ if match is None:
+ continue
+ if match.group(1):
+ # The import starts with a '.', we will download the relevant file
+ if any(imp[1] == match.group(2) for imp in imports):
+ # We already have this import
+ continue
+ if match.group(3):
+ # The import has a comment with 'From:', we'll retrieve it from the given url
+ url_path = match.group(3)
+ url_path, sub_directory = _convert_github_url(url_path)
+ imports.append(("external", match.group(2), url_path, sub_directory))
+ elif match.group(2):
+ # The import should be at the same place as the file
+ imports.append(("internal", match.group(2), match.group(2), None))
+ else:
+ if match.group(3):
+ # The import has a comment with `From: git+https:...`, asks user to pip install from git.
+ url_path = match.group(3)
+ imports.append(("library", match.group(2), url_path, None))
+ else:
+ imports.append(("library", match.group(2), match.group(2), None))
+
+ return imports
+
+
+def copyfunc(func):
+ result = types.FunctionType(func.__code__, func.__globals__, func.__name__, func.__defaults__, func.__closure__)
+ result.__kwdefaults__ = func.__kwdefaults__
+ return result
+
+
+Y = TypeVar("Y")
+
+
+def _write_generator_to_queue(queue: queue.Queue, func: Callable[..., Iterable[Y]], kwargs: dict) -> int:
+ for i, result in enumerate(func(**kwargs)):
+ queue.put(result)
+ return i
+
+
+def _get_pool_pid(pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool]) -> Set[int]:
+ return {f.pid for f in pool._pool}
+
+
+def iflatmap_unordered(
+ pool: Union[multiprocessing.pool.Pool, multiprocess.pool.Pool],
+ func: Callable[..., Iterable[Y]],
+ *,
+ kwargs_iterable: Iterable[dict],
+) -> Iterable[Y]:
+ initial_pool_pid = _get_pool_pid(pool)
+ pool_changed = False
+ manager_cls = Manager if isinstance(pool, multiprocessing.pool.Pool) else multiprocess.Manager
+ with manager_cls() as manager:
+ queue = manager.Queue()
+ async_results = [
+ pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_iterable
+ ]
+ try:
+ while True:
+ try:
+ yield queue.get(timeout=0.05)
+ except Empty:
+ if all(async_result.ready() for async_result in async_results) and queue.empty():
+ break
+ if _get_pool_pid(pool) != initial_pool_pid:
+ pool_changed = True
+ # One of the subprocesses has died. We should not wait forever.
+ raise RuntimeError(
+ "One of the subprocesses has abruptly died during map operation."
+ "To debug the error, disable multiprocessing."
+ )
+ finally:
+ if not pool_changed:
+ # we get the result in case there's an error to raise
+ [async_result.get(timeout=0.05) for async_result in async_results]
+
+
+T = TypeVar("T")
+
+
+def iter_batched(iterable: Iterable[T], n: int) -> Iterable[List[T]]:
+ if n < 1:
+ raise ValueError(f"Invalid batch size {n}")
+ batch = []
+ for item in iterable:
+ batch.append(item)
+ if len(batch) == n:
+ yield batch
+ batch = []
+ if batch:
+ yield batch
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/readme.py b/venv/lib/python3.10/site-packages/datasets/utils/readme.py
new file mode 100644
index 0000000000000000000000000000000000000000..66ed087f7d67181c6840179fa634e8b8e4238f85
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/readme.py
@@ -0,0 +1,277 @@
+# loading package files: https://stackoverflow.com/a/20885799
+import importlib.resources as pkg_resources
+import logging
+from pathlib import Path
+from typing import Any, List, Tuple
+
+import yaml
+
+from . import resources
+from .deprecation_utils import deprecated
+
+
+BASE_REF_URL = "https://github.com/huggingface/datasets/tree/main/src/datasets/utils"
+this_url = f"{BASE_REF_URL}/{__file__}"
+logger = logging.getLogger(__name__)
+
+
+def load_yaml_resource(resource: str) -> Tuple[Any, str]:
+ content = pkg_resources.read_text(resources, resource)
+ return yaml.safe_load(content), f"{BASE_REF_URL}/resources/{resource}"
+
+
+readme_structure, known_readme_structure_url = load_yaml_resource("readme_structure.yaml")
+
+FILLER_TEXT = [
+ "[Needs More Information]",
+ "[More Information Needed]",
+ "(https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)",
+]
+
+# Dictionary representation of section/readme, error_list, warning_list
+ReadmeValidatorOutput = Tuple[dict, List[str], List[str]]
+
+
+class Section:
+ def __init__(self, name: str, level: str, lines: List[str] = None, suppress_parsing_errors: bool = False):
+ self.name = name
+ self.level = level
+ self.lines = lines
+ self.text = ""
+ self.is_empty_text = True
+ self.content = {}
+ self.parsing_error_list = []
+ self.parsing_warning_list = []
+ if self.lines is not None:
+ self.parse(suppress_parsing_errors=suppress_parsing_errors)
+
+ def parse(self, suppress_parsing_errors: bool = False):
+ current_sub_level = ""
+ current_lines = []
+ code_start = False
+ for line in self.lines:
+ if line.strip(" \n") == "":
+ continue
+ elif line.strip(" \n")[:3] == "```":
+ code_start = not code_start
+ elif line.split()[0] == self.level + "#" and not code_start:
+ if current_sub_level != "":
+ self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines)
+ current_lines = []
+ else:
+ if current_lines != []:
+ self.text += "".join(current_lines).strip()
+ if self.text != "" and self.text not in FILLER_TEXT:
+ self.is_empty_text = False
+ current_lines = []
+
+ current_sub_level = " ".join(line.split()[1:]).strip(" \n")
+ else:
+ current_lines.append(line)
+ else:
+ if current_sub_level != "":
+ if current_sub_level in self.content:
+ self.parsing_error_list.append(
+ f"Multiple sections with the same heading `{current_sub_level}` have been found. Please keep only one of these sections."
+ )
+ self.content[current_sub_level] = Section(current_sub_level, self.level + "#", current_lines)
+ else:
+ if current_lines != []:
+ self.text += "".join(current_lines).strip()
+ if self.text != "" and self.text not in FILLER_TEXT:
+ self.is_empty_text = False
+
+ if self.level == "" and not suppress_parsing_errors:
+ if self.parsing_error_list != [] or self.parsing_warning_list != []:
+ errors = errors = "\n".join("-\t" + x for x in self.parsing_error_list + self.parsing_warning_list)
+ error_string = f"The following issues were found while parsing the README at `{self.name}`:\n" + errors
+ raise ValueError(error_string)
+
+ def validate(self, structure: dict) -> ReadmeValidatorOutput:
+ """Validates a Section class object recursively using the structure provided as a dictionary.
+
+ Args:
+ structute (:obj: `dict`): The dictionary representing expected structure.
+
+ Returns:
+ :obj: `ReadmeValidatorOutput`: The dictionary representation of the section, and the errors.
+ """
+ # Header text validation
+ error_list = []
+ warning_list = []
+ if structure["allow_empty"] is False:
+ # If content is expected
+ if self.is_empty_text and self.content == {}:
+ # If no content is found, mention it in the error_list
+ error_list.append(f"Expected some content in section `{self.name}` but it is empty.")
+
+ if structure["allow_empty_text"] is False:
+ # If some text is expected
+ if self.is_empty_text:
+ # If no text is found, mention it in the error_list
+ error_list.append(
+ f"Expected some text in section `{self.name}` but it is empty (text in subsections are ignored)."
+ )
+ # Subsections Validation
+ if structure["subsections"] is not None:
+ # If subsections are expected
+ if self.content == {}:
+ # If no subsections are present
+ values = [subsection["name"] for subsection in structure["subsections"]]
+ # Mention the expected values in the error_list
+ error_list.append(
+ f"Section `{self.name}` expected the following subsections: {', '.join(['`'+x+'`' for x in values])}. Found 'None'."
+ )
+ else:
+ # If some subsections are present
+ structure_names = [subsection["name"] for subsection in structure["subsections"]]
+ has_missing_subsections = False
+ for idx, name in enumerate(structure_names):
+ if name not in self.content:
+ # If the expected subsection is not present
+ error_list.append(f"Section `{self.name}` is missing subsection: `{name}`.")
+ has_missing_subsections = True
+ else:
+ # If the subsection is present, validate subsection, return the result
+ # and concat the errors from subsection to section error_list
+
+ # Skip sublevel validation if current level is `###`
+ if self.level == "###":
+ continue
+ else:
+ _, subsec_error_list, subsec_warning_list = self.content[name].validate(
+ structure["subsections"][idx]
+ )
+ error_list += subsec_error_list
+ warning_list += subsec_warning_list
+
+ if has_missing_subsections: # we only allow to have extra subsections if all the other ones are here
+ for name in self.content:
+ if name not in structure_names:
+ # If an extra subsection is present
+ warning_list.append(
+ f"`{self.name}` has an extra subsection: `{name}`. Skipping further validation checks for this subsection as expected structure is unknown."
+ )
+ if error_list:
+ # If there are errors, do not return the dictionary as it is invalid
+ return {}, error_list, warning_list
+ else:
+ return self.to_dict(), error_list, warning_list
+
+ def to_dict(self) -> dict:
+ """Returns the dictionary representation of a section."""
+ return {
+ "name": self.name,
+ "text": self.text,
+ "is_empty_text": self.is_empty_text,
+ "subsections": [value.to_dict() for value in self.content.values()],
+ }
+
+
+@deprecated("Use `huggingface_hub.DatasetCard` instead.")
+class ReadMe(Section): # Level 0
+ def __init__(self, name: str, lines: List[str], structure: dict = None, suppress_parsing_errors: bool = False):
+ super().__init__(name=name, level="") # Not using lines here as we need to use a child class parse
+ self.structure = structure
+ self.yaml_tags_line_count = -2
+ self.tag_count = 0
+ self.lines = lines
+ if self.lines is not None:
+ self.parse(suppress_parsing_errors=suppress_parsing_errors)
+
+ def validate(self):
+ if self.structure is None:
+ content, error_list, warning_list = self._validate(readme_structure)
+ else:
+ content, error_list, warning_list = self._validate(self.structure)
+ if error_list != [] or warning_list != []:
+ errors = "\n".join(["-\t" + x for x in error_list + warning_list])
+ error_string = f"The following issues were found for the README at `{self.name}`:\n" + errors
+ raise ValueError(error_string)
+
+ @classmethod
+ def from_readme(cls, path: Path, structure: dict = None, suppress_parsing_errors: bool = False):
+ with open(path, encoding="utf-8") as f:
+ lines = f.readlines()
+ return cls(path, lines, structure, suppress_parsing_errors=suppress_parsing_errors)
+
+ @classmethod
+ def from_string(
+ cls, string: str, structure: dict = None, root_name: str = "root", suppress_parsing_errors: bool = False
+ ):
+ lines = string.split("\n")
+ return cls(root_name, lines, structure, suppress_parsing_errors=suppress_parsing_errors)
+
+ def parse(self, suppress_parsing_errors: bool = False):
+ # Skip Tags
+ line_count = 0
+
+ for line in self.lines:
+ self.yaml_tags_line_count += 1
+ if line.strip(" \n") == "---":
+ self.tag_count += 1
+ if self.tag_count == 2:
+ break
+ line_count += 1
+ if self.tag_count == 2:
+ self.lines = self.lines[line_count + 1 :] # Get the last + 1 th item.
+ else:
+ self.lines = self.lines[self.tag_count :]
+ super().parse(suppress_parsing_errors=suppress_parsing_errors)
+
+ def __str__(self):
+ """Returns the string of dictionary representation of the ReadMe."""
+ return str(self.to_dict())
+
+ def _validate(self, readme_structure):
+ error_list = []
+ warning_list = []
+ if self.yaml_tags_line_count == 0:
+ warning_list.append("Empty YAML markers are present in the README.")
+ elif self.tag_count == 0:
+ warning_list.append("No YAML markers are present in the README.")
+ elif self.tag_count == 1:
+ warning_list.append("Only the start of YAML tags present in the README.")
+ # Check how many first level sections are present.
+ num_first_level_keys = len(self.content.keys())
+ if num_first_level_keys > 1:
+ # If more than one, add to the error list, continue
+ error_list.append(
+ f"The README has several first-level headings: {', '.join(['`'+x+'`' for x in list(self.content.keys())])}. Only one heading is expected. Skipping further validation for this README."
+ )
+ elif num_first_level_keys < 1:
+ # If less than one, append error.
+ error_list.append(
+ "The README has no first-level headings. One heading is expected. Skipping further validation for this README."
+ )
+
+ else:
+ # If one exactly
+ start_key = list(self.content.keys())[0] # Get the key
+ if start_key.startswith("Dataset Card for"): # Check correct start
+ # If the starting is correct, validate all the sections
+ _, sec_error_list, sec_warning_list = self.content[start_key].validate(
+ readme_structure["subsections"][0]
+ )
+ error_list += sec_error_list
+ warning_list += sec_warning_list
+ else:
+ # If not found, append error
+ error_list.append(
+ "No first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
+ )
+ if error_list:
+ # If there are errors, do not return the dictionary as it is invalid
+ return {}, error_list, warning_list
+ else:
+ return self.to_dict(), error_list, warning_list
+
+
+if __name__ == "__main__":
+ from argparse import ArgumentParser
+
+ ap = ArgumentParser(usage="Validate the content (excluding YAML tags) of a README.md file.")
+ ap.add_argument("readme_filepath")
+ args = ap.parse_args()
+ readme_filepath = Path(args.readme_filepath)
+ readme = ReadMe.from_readme(readme_filepath)
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/stratify.py b/venv/lib/python3.10/site-packages/datasets/utils/stratify.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0967aa1abb790f741af5ff920c67e615d1b01da
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/stratify.py
@@ -0,0 +1,107 @@
+import numpy as np
+
+
+def approximate_mode(class_counts, n_draws, rng):
+ """Computes approximate mode of multivariate hypergeometric.
+ This is an approximation to the mode of the multivariate
+ hypergeometric given by class_counts and n_draws.
+ It shouldn't be off by more than one.
+ It is the mostly likely outcome of drawing n_draws many
+ samples from the population given by class_counts.
+ Args
+ ----------
+ class_counts : ndarray of int
+ Population per class.
+ n_draws : int
+ Number of draws (samples to draw) from the overall population.
+ rng : random state
+ Used to break ties.
+ Returns
+ -------
+ sampled_classes : ndarray of int
+ Number of samples drawn from each class.
+ np.sum(sampled_classes) == n_draws
+
+ """
+ # this computes a bad approximation to the mode of the
+ # multivariate hypergeometric given by class_counts and n_draws
+ continuous = n_draws * class_counts / class_counts.sum()
+ # floored means we don't overshoot n_samples, but probably undershoot
+ floored = np.floor(continuous)
+ # we add samples according to how much "left over" probability
+ # they had, until we arrive at n_samples
+ need_to_add = int(n_draws - floored.sum())
+ if need_to_add > 0:
+ remainder = continuous - floored
+ values = np.sort(np.unique(remainder))[::-1]
+ # add according to remainder, but break ties
+ # randomly to avoid biases
+ for value in values:
+ (inds,) = np.where(remainder == value)
+ # if we need_to_add less than what's in inds
+ # we draw randomly from them.
+ # if we need to add more, we add them all and
+ # go to the next value
+ add_now = min(len(inds), need_to_add)
+ inds = rng.choice(inds, size=add_now, replace=False)
+ floored[inds] += 1
+ need_to_add -= add_now
+ if need_to_add == 0:
+ break
+ return floored.astype(np.int64)
+
+
+def stratified_shuffle_split_generate_indices(y, n_train, n_test, rng, n_splits=10):
+ """
+
+ Provides train/test indices to split data in train/test sets.
+ It's reference is taken from StratifiedShuffleSplit implementation
+ of scikit-learn library.
+
+ Args
+ ----------
+
+ n_train : int,
+ represents the absolute number of train samples.
+
+ n_test : int,
+ represents the absolute number of test samples.
+
+ random_state : int or RandomState instance, default=None
+ Controls the randomness of the training and testing indices produced.
+ Pass an int for reproducible output across multiple function calls.
+
+ n_splits : int, default=10
+ Number of re-shuffling & splitting iterations.
+ """
+ classes, y_indices = np.unique(y, return_inverse=True)
+ n_classes = classes.shape[0]
+ class_counts = np.bincount(y_indices)
+ if np.min(class_counts) < 2:
+ raise ValueError("Minimum class count error")
+ if n_train < n_classes:
+ raise ValueError(
+ "The train_size = %d should be greater or " "equal to the number of classes = %d" % (n_train, n_classes)
+ )
+ if n_test < n_classes:
+ raise ValueError(
+ "The test_size = %d should be greater or " "equal to the number of classes = %d" % (n_test, n_classes)
+ )
+ class_indices = np.split(np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1])
+ for _ in range(n_splits):
+ n_i = approximate_mode(class_counts, n_train, rng)
+ class_counts_remaining = class_counts - n_i
+ t_i = approximate_mode(class_counts_remaining, n_test, rng)
+
+ train = []
+ test = []
+
+ for i in range(n_classes):
+ permutation = rng.permutation(class_counts[i])
+ perm_indices_class_i = class_indices[i].take(permutation, mode="clip")
+ train.extend(perm_indices_class_i[: n_i[i]])
+ test.extend(perm_indices_class_i[n_i[i] : n_i[i] + t_i[i]])
+ train = rng.permutation(train)
+ test = rng.permutation(test)
+
+ yield train, test
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/tf_utils.py b/venv/lib/python3.10/site-packages/datasets/utils/tf_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b69f5c85b2c38bb47506a4b2fedb5a69e1d37c00
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/tf_utils.py
@@ -0,0 +1,582 @@
+# Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""TF-specific utils import."""
+
+import os
+import warnings
+from functools import partial
+from math import ceil
+from uuid import uuid4
+
+import numpy as np
+import pyarrow as pa
+from multiprocess import get_context
+
+
+try:
+ from multiprocess.shared_memory import SharedMemory
+except ImportError:
+ SharedMemory = None # Version checks should prevent this being called on older Python versions
+
+from .. import config
+
+
+def minimal_tf_collate_fn(features):
+ if isinstance(features, dict): # case batch_size=None: nothing to collate
+ return features
+ elif config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ first = features[0]
+ batch = {}
+ for k, v in first.items():
+ if isinstance(v, np.ndarray):
+ batch[k] = np.stack([f[k] for f in features])
+ elif isinstance(v, tf.Tensor):
+ batch[k] = tf.stack([f[k] for f in features])
+ else:
+ batch[k] = np.array([f[k] for f in features])
+ return batch
+
+
+def minimal_tf_collate_fn_with_renaming(features):
+ batch = minimal_tf_collate_fn(features)
+ if "label" in batch:
+ batch["labels"] = batch["label"]
+ del batch["label"]
+ return batch
+
+
+def is_numeric_pa_type(pa_type):
+ if pa.types.is_list(pa_type):
+ return is_numeric_pa_type(pa_type.value_type)
+ return pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type)
+
+
+def is_numeric_feature(feature):
+ from .. import ClassLabel, Sequence, Value
+ from ..features.features import _ArrayXD
+
+ if isinstance(feature, Sequence):
+ return is_numeric_feature(feature.feature)
+ elif isinstance(feature, list):
+ return is_numeric_feature(feature[0])
+ elif isinstance(feature, _ArrayXD):
+ return is_numeric_pa_type(feature().storage_dtype)
+ elif isinstance(feature, Value):
+ return is_numeric_pa_type(feature())
+ elif isinstance(feature, ClassLabel):
+ return True
+ else:
+ return False
+
+
+def np_get_batch(
+ indices, dataset, cols_to_retain, collate_fn, collate_fn_args, columns_to_np_types, return_dict=False
+):
+ if not isinstance(indices, np.ndarray):
+ indices = indices.numpy()
+
+ is_batched = True
+ # Optimization - if we're loading a sequential batch, do it with slicing instead of a list of indices
+ if isinstance(indices, np.integer):
+ batch = dataset[indices.item()]
+ is_batched = False
+ elif np.all(np.diff(indices) == 1):
+ batch = dataset[indices[0] : indices[-1] + 1]
+ elif isinstance(indices, np.ndarray):
+ batch = dataset[indices]
+ else:
+ raise RuntimeError("Unexpected type for indices: {}".format(type(indices)))
+
+ if cols_to_retain is not None:
+ batch = {
+ key: value
+ for key, value in batch.items()
+ if key in cols_to_retain or key in ("label", "label_ids", "labels")
+ }
+
+ if is_batched:
+ actual_size = len(list(batch.values())[0]) # Get the length of one of the arrays, assume all same
+ # Our collators expect a list of dicts, not a dict of lists/arrays, so we invert
+ batch = [{key: value[i] for key, value in batch.items()} for i in range(actual_size)]
+ batch = collate_fn(batch, **collate_fn_args)
+
+ if return_dict:
+ out_batch = {}
+ for col, cast_dtype in columns_to_np_types.items():
+ # In case the collate_fn returns something strange
+ array = np.array(batch[col])
+ array = array.astype(cast_dtype)
+ out_batch[col] = array
+ else:
+ out_batch = []
+ for col, cast_dtype in columns_to_np_types.items():
+ # In case the collate_fn returns something strange
+ array = np.array(batch[col])
+ array = array.astype(cast_dtype)
+ out_batch.append(array)
+ return out_batch
+
+
+def dataset_to_tf(
+ dataset,
+ cols_to_retain,
+ collate_fn,
+ collate_fn_args,
+ columns_to_np_types,
+ output_signature,
+ shuffle,
+ batch_size,
+ drop_remainder,
+):
+ """Create a tf.data.Dataset from the underlying Dataset. This is a single-process method - the multiprocess
+ equivalent is multiprocess_dataset_to_tf.
+
+ Args:
+ dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
+ cols_to_retain (`List[str]`): Dataset column(s) to load in the
+ tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
+ that do not exist in the original dataset.
+ collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
+ `collate_fn`. Can be empty.
+ columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
+ output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
+ `tf.TensorSpec` objects.
+ shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
+ validation/evaluation.
+ batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
+ the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
+ drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
+ defaults to the same setting as shuffle.
+
+ Returns:
+ `tf.data.Dataset`
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ # TODO Matt: When our minimum Python version is 3.8 or higher, we can delete all of this and move everything
+ # to the NumPy multiprocessing path.
+ if hasattr(tf, "random_index_shuffle"):
+ random_index_shuffle = tf.random_index_shuffle
+ elif hasattr(tf.random.experimental, "index_shuffle"):
+ random_index_shuffle = tf.random.experimental.index_shuffle
+ else:
+ if len(dataset) > 10_000_000:
+ warnings.warn(
+ "to_tf_dataset() can be memory-inefficient on versions of TensorFlow older than 2.9. "
+ "If you are iterating over a dataset with a very large number of samples, consider "
+ "upgrading to TF >= 2.9."
+ )
+ random_index_shuffle = None
+
+ getter_fn = partial(
+ np_get_batch,
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ return_dict=False,
+ )
+
+ # This works because dictionaries always output in the same order
+ tout = [tf.dtypes.as_dtype(dtype) for dtype in columns_to_np_types.values()]
+
+ @tf.function(input_signature=[tf.TensorSpec(None, tf.int64)])
+ def fetch_function(indices):
+ output = tf.py_function(
+ getter_fn,
+ inp=[indices],
+ Tout=tout,
+ )
+ return {key: output[i] for i, key in enumerate(columns_to_np_types.keys())}
+
+ tf_dataset = tf.data.Dataset.range(len(dataset))
+
+ if shuffle and random_index_shuffle is not None:
+ base_seed = tf.fill((3,), value=tf.cast(-1, dtype=tf.int64))
+
+ def scan_random_index(state, index):
+ if tf.reduce_all(state == -1):
+ # This generates a new random seed once per epoch only,
+ # to ensure that we iterate over each sample exactly once per epoch
+ state = tf.random.uniform(shape=(3,), maxval=2**62, dtype=tf.int64)
+ shuffled_index = random_index_shuffle(index=index, seed=state, max_index=len(dataset) - 1)
+ return state, shuffled_index
+
+ tf_dataset = tf_dataset.scan(base_seed, scan_random_index)
+ elif shuffle:
+ tf_dataset = tf_dataset.shuffle(tf_dataset.cardinality())
+
+ if batch_size is not None:
+ tf_dataset = tf_dataset.batch(batch_size, drop_remainder=drop_remainder)
+
+ tf_dataset = tf_dataset.map(fetch_function)
+
+ if batch_size is not None:
+
+ def ensure_shapes(input_dict):
+ return {key: tf.ensure_shape(val, output_signature[key].shape) for key, val in input_dict.items()}
+
+ else:
+ # Ensure shape but remove batch dimension of output_signature[key].shape
+ def ensure_shapes(input_dict):
+ return {key: tf.ensure_shape(val, output_signature[key].shape[1:]) for key, val in input_dict.items()}
+
+ return tf_dataset.map(ensure_shapes)
+
+
+class SharedMemoryContext:
+ # This is a context manager for creating shared memory that ensures cleanup happens even if a process is interrupted
+ # The process that creates shared memory is always the one responsible for unlinking it in the end
+ def __init__(self):
+ self.created_shms = []
+ self.opened_shms = []
+
+ def get_shm(self, name, size, create):
+ shm = SharedMemory(size=int(size), name=name, create=create)
+ if create:
+ # We only unlink the ones we created in this context
+ self.created_shms.append(shm)
+ else:
+ # If we didn't create it, we only close it when done, we don't unlink it
+ self.opened_shms.append(shm)
+ return shm
+
+ def get_array(self, name, shape, dtype, create):
+ shm = self.get_shm(name=name, size=np.prod(shape) * np.dtype(dtype).itemsize, create=create)
+ return np.ndarray(shape, dtype=dtype, buffer=shm.buf)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ for shm in self.created_shms:
+ shm.close()
+ shm.unlink()
+ for shm in self.opened_shms:
+ shm.close()
+
+
+class NumpyMultiprocessingGenerator:
+ def __init__(
+ self,
+ dataset,
+ cols_to_retain,
+ collate_fn,
+ collate_fn_args,
+ columns_to_np_types,
+ output_signature,
+ shuffle,
+ batch_size,
+ drop_remainder,
+ num_workers,
+ ):
+ self.dataset = dataset
+ self.cols_to_retain = cols_to_retain
+ self.collate_fn = collate_fn
+ self.collate_fn_args = collate_fn_args
+ self.string_columns = [col for col, dtype in columns_to_np_types.items() if dtype in (np.unicode_, np.str_)]
+ # Strings will be converted to arrays of single unicode chars, so that we can have a constant itemsize
+ self.columns_to_np_types = {
+ col: dtype if col not in self.string_columns else np.dtype("U1")
+ for col, dtype in columns_to_np_types.items()
+ }
+ self.output_signature = output_signature
+ self.shuffle = shuffle
+ self.batch_size = batch_size
+ self.drop_remainder = drop_remainder
+ self.num_workers = num_workers
+ # Because strings are converted to characters, we need to add one extra dimension to the shape
+ self.columns_to_ranks = {
+ col: int(spec.shape.rank) if col not in self.string_columns else int(spec.shape.rank) + 1
+ for col, spec in output_signature.items()
+ }
+
+ def __iter__(self):
+ # Make sure we only spawn workers if they have work to do
+ num_workers = min(self.num_workers, int(ceil(len(self.dataset) / self.batch_size)))
+ # Do the shuffling in iter so that it's done at the start of each epoch
+ per_worker_batches, final_batch, final_batch_worker = self.distribute_batches(
+ self.dataset, self.batch_size, self.drop_remainder, num_workers, self.shuffle
+ )
+ ctx = get_context("spawn")
+ names = []
+ shape_arrays = []
+ workers = []
+ array_ready_events = [ctx.Event() for _ in range(num_workers)]
+ array_loaded_events = [ctx.Event() for _ in range(num_workers)]
+
+ base_args = {
+ "dataset": self.dataset,
+ "cols_to_retain": self.cols_to_retain,
+ "collate_fn": self.collate_fn,
+ "collate_fn_args": self.collate_fn_args,
+ "columns_to_np_types": self.columns_to_np_types,
+ "columns_to_ranks": self.columns_to_ranks,
+ "string_columns": self.string_columns,
+ }
+ with SharedMemoryContext() as shm_ctx:
+ for i in range(num_workers):
+ worker_random_id = str(uuid4())
+ worker_name = f"dw_{i}_{worker_random_id}"[:10]
+ names.append(worker_name)
+
+ worker_shape_arrays = {
+ col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=True)
+ for col, rank in self.columns_to_ranks.items()
+ }
+ shape_arrays.append(worker_shape_arrays)
+
+ worker_indices = per_worker_batches[i]
+ if i == final_batch_worker and final_batch is not None:
+ final_batch_arg = final_batch
+ else:
+ final_batch_arg = None
+ worker_kwargs = {
+ "worker_name": worker_name,
+ "indices": worker_indices,
+ "extra_batch": final_batch_arg,
+ "array_ready_event": array_ready_events[i],
+ "array_loaded_event": array_loaded_events[i],
+ **base_args,
+ }
+ worker = ctx.Process(target=self.worker_loop, kwargs=worker_kwargs, daemon=True)
+ worker.start()
+ workers.append(worker)
+
+ end_signal_received = False
+ while not end_signal_received:
+ for i in range(num_workers):
+ if not array_ready_events[i].wait(timeout=60):
+ raise TimeoutError("Data loading worker timed out!")
+ array_ready_events[i].clear()
+ array_shapes = shape_arrays[i]
+ if any(np.any(shape < 0) for shape in array_shapes.values()):
+ # Child processes send negative array shapes to indicate
+ # that no more data is going to be sent
+ end_signal_received = True
+ break
+ # Matt: Because array shapes are variable we recreate the shared memory each iteration.
+ # I suspect repeatedly opening lots of shared memory is the bottleneck for the parent process.
+ # A future optimization, at the cost of some code complexity, could be to reuse shared memory
+ # between iterations, but this would require knowing in advance the maximum size, or having
+ # a system to only create a new memory block when a new maximum size is seen.
+ # Another potential optimization would be to figure out which memory copies are necessary,
+ # or whether we can yield objects straight out of shared memory.
+ with SharedMemoryContext() as batch_shm_ctx:
+ # This memory context only lasts long enough to copy everything out of the batch
+ arrays = {
+ col: batch_shm_ctx.get_array(
+ f"{names[i]}_{col}",
+ shape=shape,
+ dtype=self.columns_to_np_types[col],
+ create=False,
+ )
+ for col, shape in array_shapes.items()
+ }
+ # Copy everything out of shm because the memory
+ # will be unlinked by the child process at some point
+ arrays = {col: np.copy(arr) for col, arr in arrays.items()}
+ # Now we convert any unicode char arrays to strings
+ for string_col in self.string_columns:
+ arrays[string_col] = (
+ arrays[string_col].view(f"U{arrays[string_col].shape[-1]}").squeeze(-1)
+ )
+ yield arrays
+ array_loaded_events[i].set()
+ # Now we just do some cleanup
+ # Shared memory is cleaned up by the context manager, so we just make sure workers finish
+ for worker in workers:
+ worker.join()
+
+ def __call__(self):
+ return self
+
+ @staticmethod
+ def worker_loop(
+ dataset,
+ cols_to_retain,
+ collate_fn,
+ collate_fn_args,
+ columns_to_np_types,
+ columns_to_ranks,
+ string_columns,
+ indices,
+ extra_batch,
+ worker_name,
+ array_ready_event,
+ array_loaded_event,
+ ):
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
+
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ tf.config.set_visible_devices([], "GPU") # Make sure workers don't try to allocate GPU memory
+
+ def send_batch_to_parent(indices):
+ batch = np_get_batch(
+ indices=indices,
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ return_dict=True,
+ )
+
+ # Now begins the fun part where we start shovelling shared memory at the parent process
+ out_arrays = {}
+ with SharedMemoryContext() as batch_shm_ctx:
+ # The batch shared memory context exists only as long as it takes for the parent process
+ # to read everything, after which it cleans everything up again
+ for col, cast_dtype in columns_to_np_types.items():
+ # Everything has to be np.array for this to work, even if the collate_fn is giving us tf.Tensor
+ array = batch[col]
+ if col in string_columns:
+ # We can't send unicode arrays over shared memory, so we convert to single chars ("U1")
+ # which have a fixed width of 4 bytes. The parent process will convert these back to strings.
+ array = array.view("U1").reshape(array.shape + (-1,))
+ shape_arrays[col][:] = array.shape
+ out_arrays[col] = batch_shm_ctx.get_array(
+ f"{worker_name}_{col}", shape=array.shape, dtype=cast_dtype, create=True
+ )
+ out_arrays[col][:] = array
+
+ array_ready_event.set()
+ array_loaded_event.wait()
+ array_loaded_event.clear()
+
+ with SharedMemoryContext() as shm_ctx:
+ shape_arrays = {
+ col: shm_ctx.get_array(f"{worker_name}_{col}_shape", shape=(rank,), dtype=np.int64, create=False)
+ for col, rank in columns_to_ranks.items()
+ }
+
+ for batch in indices:
+ send_batch_to_parent(batch)
+ if extra_batch is not None:
+ send_batch_to_parent(extra_batch)
+ # Now we send a batsignal to the parent process that we're done
+ for col, array in shape_arrays.items():
+ array[:] = -1
+ array_ready_event.set()
+
+ @staticmethod
+ def distribute_batches(dataset, batch_size, drop_remainder, num_workers, shuffle):
+ indices = np.arange(len(dataset))
+ if shuffle:
+ np.random.shuffle(indices)
+ num_samples = len(indices)
+ # We distribute the batches so that reading from the workers in round-robin order yields the exact
+ # order specified in indices. This is only important when shuffle is False, but we do it regardless.
+ incomplete_batch_cutoff = num_samples - (num_samples % batch_size)
+ indices, last_incomplete_batch = np.split(indices, [incomplete_batch_cutoff])
+ if drop_remainder or len(last_incomplete_batch) == 0:
+ last_incomplete_batch = None
+
+ indices = indices.reshape(-1, batch_size)
+ num_batches = len(indices)
+ final_batches_cutoff = num_batches - (num_batches % num_workers)
+ indices, final_batches = np.split(indices, [final_batches_cutoff])
+ indices = indices.reshape(-1, num_workers, batch_size)
+
+ per_worker_indices = np.split(indices, indices.shape[1], axis=1)
+ per_worker_indices = [np.squeeze(worker_indices, 1) for worker_indices in per_worker_indices]
+ # Distribute the final batches to the first workers
+ for i in range(len(final_batches)):
+ # len(final_batches) can be zero, and is always less than num_workers
+ per_worker_indices[i] = np.concatenate([per_worker_indices[i], final_batches[i].reshape(1, -1)], axis=0)
+ # Add the last incomplete batch to the next worker, which might be the first worker
+ if last_incomplete_batch is not None:
+ incomplete_batch_worker_idx = len(final_batches)
+ else:
+ incomplete_batch_worker_idx = None
+ return per_worker_indices, last_incomplete_batch, incomplete_batch_worker_idx
+
+
+def multiprocess_dataset_to_tf(
+ dataset,
+ cols_to_retain,
+ collate_fn,
+ collate_fn_args,
+ columns_to_np_types,
+ output_signature,
+ shuffle,
+ batch_size,
+ drop_remainder,
+ num_workers,
+):
+ """Create a tf.data.Dataset from the underlying Dataset. This is a multi-process method - the single-process
+ equivalent is dataset_to_tf.
+
+ Args:
+ dataset (`Dataset`): Dataset to wrap with tf.data.Dataset.
+ cols_to_retain (`List[str]`): Dataset column(s) to load in the
+ tf.data.Dataset. It is acceptable to include column names that are created by the `collate_fn` and
+ that do not exist in the original dataset.
+ collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
+ `collate_fn`. Can be empty.
+ columns_to_np_types (`Dict[str, np.dtype]`): A `dict` mapping column names to numpy dtypes.
+ output_signature (`Dict[str, tf.TensorSpec]`): A `dict` mapping column names to
+ `tf.TensorSpec` objects.
+ shuffle(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
+ validation/evaluation.
+ batch_size (`int`, default `None`): Size of batches to load from the dataset. Defaults to `None`, which implies that
+ the dataset won't be batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
+ drop_remainder(`bool`, default `None`): Drop the last incomplete batch when loading. If not provided,
+ defaults to the same setting as shuffle.
+ num_workers (`int`): Number of workers to use for loading the dataset. Should be >= 1.
+
+ Returns:
+ `tf.data.Dataset`
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ data_generator = NumpyMultiprocessingGenerator(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ num_workers=num_workers,
+ )
+
+ tf_dataset = tf.data.Dataset.from_generator(data_generator, output_signature=output_signature)
+ if drop_remainder:
+ dataset_length = int(len(dataset) // batch_size)
+ else:
+ dataset_length = int(ceil(len(dataset) / batch_size))
+ return tf_dataset.apply(tf.data.experimental.assert_cardinality(dataset_length))
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/tqdm.py b/venv/lib/python3.10/site-packages/datasets/utils/tqdm.py
new file mode 100644
index 0000000000000000000000000000000000000000..e28a8ff7ccfda7d4fd9a6195636d181f285ceb65
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/tqdm.py
@@ -0,0 +1,131 @@
+"""Utility helpers to handle progress bars in `datasets`.
+
+Example:
+ 1. Use `datasets.utils.tqdm` as you would use `tqdm.tqdm` or `tqdm.auto.tqdm`.
+ 2. To disable progress bars, either use `disable_progress_bars()` helper or set the
+ environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` to 1.
+ 3. To re-enable progress bars, use `enable_progress_bars()`.
+ 4. To check whether progress bars are disabled, use `are_progress_bars_disabled()`.
+
+NOTE: Environment variable `HF_DATASETS_DISABLE_PROGRESS_BARS` has the priority.
+
+Example:
+ ```py
+ from datasets.utils import (
+ are_progress_bars_disabled,
+ disable_progress_bars,
+ enable_progress_bars,
+ tqdm,
+ )
+
+ # Disable progress bars globally
+ disable_progress_bars()
+
+ # Use as normal `tqdm`
+ for _ in tqdm(range(5)):
+ do_something()
+
+ # Still not showing progress bars, as `disable=False` is overwritten to `True`.
+ for _ in tqdm(range(5), disable=False):
+ do_something()
+
+ are_progress_bars_disabled() # True
+
+ # Re-enable progress bars globally
+ enable_progress_bars()
+
+ # Progress bar will be shown !
+ for _ in tqdm(range(5)):
+ do_something()
+ ```
+"""
+
+import warnings
+
+from tqdm.auto import tqdm as old_tqdm
+
+from ..config import HF_DATASETS_DISABLE_PROGRESS_BARS
+
+
+# `HF_DATASETS_DISABLE_PROGRESS_BARS` is `Optional[bool]` while `_hf_datasets_progress_bars_disabled`
+# is a `bool`. If `HF_DATASETS_DISABLE_PROGRESS_BARS` is set to True or False, it has priority.
+# If `HF_DATASETS_DISABLE_PROGRESS_BARS` is None, it means the user have not set the
+# environment variable and is free to enable/disable progress bars programmatically.
+# TL;DR: env variable has priority over code.
+#
+# By default, progress bars are enabled.
+_hf_datasets_progress_bars_disabled: bool = HF_DATASETS_DISABLE_PROGRESS_BARS or False
+
+
+def disable_progress_bars() -> None:
+ """
+ Disable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
+ variable has been set.
+
+ Use [`~utils.enable_progress_bars`] to re-enable them.
+ """
+ if HF_DATASETS_DISABLE_PROGRESS_BARS is False:
+ warnings.warn(
+ "Cannot disable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=0` is set and has"
+ " priority."
+ )
+ return
+ global _hf_datasets_progress_bars_disabled
+ _hf_datasets_progress_bars_disabled = True
+
+
+def enable_progress_bars() -> None:
+ """
+ Enable globally progress bars used in `datasets` except if `HF_DATASETS_DISABLE_PROGRESS_BAR` environment
+ variable has been set.
+
+ Use [`~utils.disable_progress_bars`] to disable them.
+ """
+ if HF_DATASETS_DISABLE_PROGRESS_BARS is True:
+ warnings.warn(
+ "Cannot enable progress bars: environment variable `HF_DATASETS_DISABLE_PROGRESS_BAR=1` is set and has"
+ " priority."
+ )
+ return
+ global _hf_datasets_progress_bars_disabled
+ _hf_datasets_progress_bars_disabled = False
+
+
+def are_progress_bars_disabled() -> bool:
+ """Return whether progress bars are globally disabled or not.
+
+ Progress bars used in `datasets` can be enable or disabled globally using [`~utils.enable_progress_bars`]
+ and [`~utils.disable_progress_bars`] or by setting `HF_DATASETS_DISABLE_PROGRESS_BAR` as environment variable.
+ """
+ global _hf_datasets_progress_bars_disabled
+ return _hf_datasets_progress_bars_disabled
+
+
+class tqdm(old_tqdm):
+ """
+ Class to override `disable` argument in case progress bars are globally disabled.
+
+ Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
+ """
+
+ def __init__(self, *args, **kwargs):
+ if are_progress_bars_disabled():
+ kwargs["disable"] = True
+ super().__init__(*args, **kwargs)
+
+ def __delattr__(self, attr: str) -> None:
+ """Fix for https://github.com/huggingface/datasets/issues/6066"""
+ try:
+ super().__delattr__(attr)
+ except AttributeError:
+ if attr != "_lock":
+ raise
+
+
+# backward compatibility
+enable_progress_bar = enable_progress_bars
+disable_progress_bar = disable_progress_bars
+
+
+def is_progress_bar_enabled():
+ return not are_progress_bars_disabled()
diff --git a/venv/lib/python3.10/site-packages/datasets/utils/typing.py b/venv/lib/python3.10/site-packages/datasets/utils/typing.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ad087fc98d2a6de2d3e493120135fc9ea49e605
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/datasets/utils/typing.py
@@ -0,0 +1,9 @@
+import os
+from typing import Dict, List, Tuple, TypeVar, Union
+
+
+T = TypeVar("T")
+
+ListLike = Union[List[T], Tuple[T, ...]]
+NestedDataStructureLike = Union[T, List[T], Dict[str, T]]
+PathLike = Union[str, bytes, os.PathLike]