diff --git a/ckpts/universal/global_step120/zero/20.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/20.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..dcde70b2e66e4b004543e5c51a023f29825ef25a --- /dev/null +++ b/ckpts/universal/global_step120/zero/20.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e0cc75fbea31f6affdffe947d5642a916d442590c4c918531411264e555d1be +size 16778396 diff --git a/ckpts/universal/global_step120/zero/20.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/20.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..80beade520f0536bd26936e8f348e0fc3649e044 --- /dev/null +++ b/ckpts/universal/global_step120/zero/20.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e754b97c41c0c16a0c8cc0c6304f726937cab0b397ef7d66be8e31c9a0e16ad0 +size 16778317 diff --git a/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..030f1bea69571c2c0a5692a4bae7a738c250414d --- /dev/null +++ b/ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:483990bec3315475e0bfeea968847d56821d682df456cb937136d24c3f5334be +size 33555627 diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_config_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_config_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eddfdfbc2bec7237d12190c4859d9860ed27b5e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_config_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_contextlib.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_contextlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab287f1bd857ea562219a6c386a0e604458d273d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_contextlib.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cpp_extension_versioner.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cpp_extension_versioner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..021e6686c9848559249d2ccae090d61b3c311fcf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cpp_extension_versioner.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cuda_trace.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cuda_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d88ac3de8d15bc0101325b0f0f9a677d15034ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cuda_trace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cxx_pytree.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cxx_pytree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4739f7aeed24c89cddb54f895f3db85f745a45cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cxx_pytree.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_device.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_device.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf4618926610a59f8cff6bf0923b6ea93e77163f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_device.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_foreach_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_foreach_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac6082c7eff00ba863d0d17146b6198146e18fff Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_foreach_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_freeze.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_freeze.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2eefe42a8f33290e4d56adf530709ee5f736e6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_freeze.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_import_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_import_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d632b5ea9f51fd8076826ae78199bad027b9b5b7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_import_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_mode_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_mode_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..666f0b5a807e66ad7211755a74a58781df89c94c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_mode_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_stats.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f07d14b549be36f6dfa18b39f4bc23151e9c177f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_stats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_triton.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_triton.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be9af30ac7e8c717d543d9f4e6f34617dc225955 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/_triton.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/backend_registration.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/backend_registration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08df97761d56746b140f9ed80e6538dbcb050c17 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/backend_registration.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_backtrace.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_backtrace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bde578126447193f06521eb3dd760b756c847aa5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_backtrace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_extension.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_extension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3dc8bf2a30266c7316fd294f6c20153b626c509c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_extension.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/deterministic.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/deterministic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2f88a9068ed32b58c23966fdbbc1bfe142ea474 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/deterministic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/file_baton.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/file_baton.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..103dc59bee34ad556c69fc7ef7f0caa2e14f84e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/file_baton.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/hooks.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c50d341f548c1b84ff990cee485f7241130d792 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/hooks.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/mkldnn.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/mkldnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eca0509de5df71a6c797f8d31b90c7b5c34369e6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/mkldnn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/mobile_optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/mobile_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e610e0b96713e5a1b8dd154160ef99dc06d4b48 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/mobile_optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b523241d4bff005ed92d207ed2e4f663ad093768 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a22a5fd177c452c91bf1dd46d887f25602c86042 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c5a6fb866dc770cbc7bce8b77e819b02865f11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/__init__.py @@ -0,0 +1,76 @@ +# TODO(VitalyFedyunin): Rearranging this imports leads to crash, +# need to cleanup dependencies and fix it +from torch.utils.data.sampler import ( + BatchSampler, + RandomSampler, + Sampler, + SequentialSampler, + SubsetRandomSampler, + WeightedRandomSampler, +) +from torch.utils.data.dataset import ( + ChainDataset, + ConcatDataset, + Dataset, + IterableDataset, + StackDataset, + Subset, + TensorDataset, + random_split, +) +from torch.utils.data.datapipes.datapipe import ( + DFIterDataPipe, + DataChunk, + IterDataPipe, + MapDataPipe, +) +from torch.utils.data.dataloader import ( + DataLoader, + _DatasetKind, + get_worker_info, + default_collate, + default_convert, +) +from torch.utils.data.distributed import DistributedSampler +from torch.utils.data.datapipes._decorator import ( + argument_validation, + functional_datapipe, + guaranteed_datapipes_determinism, + non_deterministic, + runtime_validation, + runtime_validation_disabled, +) + +__all__ = ['BatchSampler', + 'ChainDataset', + 'ConcatDataset', + 'DFIterDataPipe', + 'DataChunk', + 'DataLoader', + 'Dataset', + 'DistributedSampler', + 'IterDataPipe', + 'IterableDataset', + 'MapDataPipe', + 'RandomSampler', + 'Sampler', + 'SequentialSampler', + 'StackDataset', + 'Subset', + 'SubsetRandomSampler', + 'TensorDataset', + 'WeightedRandomSampler', + '_DatasetKind', + 'argument_validation', + 'default_collate', + 'default_convert', + 'functional_datapipe', + 'get_worker_info', + 'guaranteed_datapipes_determinism', + 'non_deterministic', + 'random_split', + 'runtime_validation', + 'runtime_validation_disabled'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py b/venv/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..be97f016a0917a771970843a4ba70deb68cdd60d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py @@ -0,0 +1,5 @@ +import warnings + +def worker_init_fn(worker_id): + warnings.warn("Usage of backward_compatibility.worker_init_fn is deprecated" + " as DataLoader automatically applies sharding in every worker") diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/dataloader.py b/venv/lib/python3.10/site-packages/torch/utils/data/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..f18bb602b50da3a23f9521ac003efbdeade2794d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/dataloader.py @@ -0,0 +1,1479 @@ +r"""Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter. + +To support these two classes, in `./_utils` we define many utility methods and +functions to be run in multiprocessing. E.g., the data loading worker loop is +in `./_utils/worker.py`. +""" + +import functools +import itertools +import logging +import os +import queue +import threading +import warnings + +from typing import Any, Callable, Iterable, TypeVar, Generic, List, Optional, Union + +import multiprocessing as python_multiprocessing +import torch +import torch.distributed as dist +import torch.multiprocessing as multiprocessing +import torch.utils.data.graph_settings + +from torch._utils import ExceptionWrapper + +from . import ( + IterDataPipe, + MapDataPipe, + IterableDataset, + Sampler, + SequentialSampler, + RandomSampler, + BatchSampler, + Dataset,) + +from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper + +from . import _utils + +__all__ = [ + "DataLoader", + "get_worker_info", + "default_collate", + "default_convert", +] + +T_co = TypeVar('T_co', covariant=True) +T = TypeVar('T') +_worker_init_fn_t = Callable[[int], None] + +# Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that +# type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'. +# See https://github.com/python/mypy/issues/3737. +_collate_fn_t = Callable[[List[T]], Any] + + +# These functions used to be defined in this file. However, it was moved to +# _utils/collate.py. Although it is rather hard to access this from user land +# (one has to explicitly directly `import torch.utils.data.dataloader`), there +# probably is user code out there using it. This aliasing maintains BC in this +# aspect. +default_collate: _collate_fn_t = _utils.collate.default_collate +default_convert = _utils.collate.default_convert + +get_worker_info = _utils.worker.get_worker_info + +logger = logging.getLogger(__name__) + + +class _DatasetKind: + Map = 0 + Iterable = 1 + + @staticmethod + def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last): + if kind == _DatasetKind.Map: + return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last) + else: + return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last) + + +class _InfiniteConstantSampler(Sampler): + r"""Analogous to ``itertools.repeat(None, None)``. + + Used as sampler for :class:`~torch.utils.data.IterableDataset`. + """ + + def __iter__(self): + while True: + yield None + + +def _get_distributed_settings(): + if dist.is_available() and dist.is_initialized(): + return dist.get_world_size(), dist.get_rank() + else: + return 1, 0 + + +def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id): + global_worker_id = worker_id + info = torch.utils.data.get_worker_info() + assert info is not None + total_workers = info.num_workers + datapipe = info.dataset + assert isinstance(datapipe, (IterDataPipe, MapDataPipe)) + # To distribute elements across distributed process evenly, we should shard data on distributed + # processes first then shard on worker processes + total_workers *= world_size + global_worker_id = global_worker_id * world_size + rank_id + # For BC, use default SHARDING_PRIORITIES + torch.utils.data.graph_settings.apply_sharding(datapipe, total_workers, global_worker_id) + if worker_init_fn is not None: + worker_init_fn(worker_id) + + +def _share_dist_seed(generator, pg): + _shared_seed = torch.empty((), dtype=torch.int64).random_(generator=generator) + if isinstance(pg, dist.ProcessGroup): + dist.broadcast(_shared_seed, src=0, group=pg) + return _shared_seed.item() + + +class DataLoader(Generic[T_co]): + r""" + Data loader combines a dataset and a sampler, and provides an iterable over the given dataset. + + The :class:`~torch.utils.data.DataLoader` supports both map-style and + iterable-style datasets with single- or multi-process loading, customizing + loading order and optional automatic batching (collation) and memory pinning. + + See :py:mod:`torch.utils.data` documentation page for more details. + + Args: + dataset (Dataset): dataset from which to load the data. + batch_size (int, optional): how many samples per batch to load + (default: ``1``). + shuffle (bool, optional): set to ``True`` to have the data reshuffled + at every epoch (default: ``False``). + sampler (Sampler or Iterable, optional): defines the strategy to draw + samples from the dataset. Can be any ``Iterable`` with ``__len__`` + implemented. If specified, :attr:`shuffle` must not be specified. + batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but + returns a batch of indices at a time. Mutually exclusive with + :attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`, + and :attr:`drop_last`. + num_workers (int, optional): how many subprocesses to use for data + loading. ``0`` means that the data will be loaded in the main process. + (default: ``0``) + collate_fn (Callable, optional): merges a list of samples to form a + mini-batch of Tensor(s). Used when using batched loading from a + map-style dataset. + pin_memory (bool, optional): If ``True``, the data loader will copy Tensors + into device/CUDA pinned memory before returning them. If your data elements + are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type, + see the example below. + drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If ``False`` and + the size of dataset is not divisible by the batch size, then the last batch + will be smaller. (default: ``False``) + timeout (numeric, optional): if positive, the timeout value for collecting a batch + from workers. Should always be non-negative. (default: ``0``) + worker_init_fn (Callable, optional): If not ``None``, this will be called on each + worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as + input, after seeding and before data loading. (default: ``None``) + multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If + ``None``, the default `multiprocessing context`_ of your operating system will + be used. (default: ``None``) + generator (torch.Generator, optional): If not ``None``, this RNG will be used + by RandomSampler to generate random indexes and multiprocessing to generate + ``base_seed`` for workers. (default: ``None``) + prefetch_factor (int, optional, keyword-only arg): Number of batches loaded + in advance by each worker. ``2`` means there will be a total of + 2 * num_workers batches prefetched across all workers. (default value depends + on the set value for num_workers. If value of num_workers=0 default is ``None``. + Otherwise, if value of ``num_workers > 0`` default is ``2``). + persistent_workers (bool, optional): If ``True``, the data loader will not shut down + the worker processes after a dataset has been consumed once. This allows to + maintain the workers `Dataset` instances alive. (default: ``False``) + pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is + ``True``. + + + .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn` + cannot be an unpicklable object, e.g., a lambda function. See + :ref:`multiprocessing-best-practices` on more details related + to multiprocessing in PyTorch. + + .. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used. + When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`, + it instead returns an estimate based on ``len(dataset) / batch_size``, with proper + rounding depending on :attr:`drop_last`, regardless of multi-process loading + configurations. This represents the best guess PyTorch can make because PyTorch + trusts user :attr:`dataset` code in correctly handling multi-process + loading to avoid duplicate data. + + However, if sharding results in multiple workers having incomplete last batches, + this estimate can still be inaccurate, because (1) an otherwise complete batch can + be broken into multiple ones and (2) more than one batch worth of samples can be + dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such + cases in general. + + See `Dataset Types`_ for more details on these two types of datasets and how + :class:`~torch.utils.data.IterableDataset` interacts with + `Multi-process data loading`_. + + .. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and + :ref:`data-loading-randomness` notes for random seed related questions. + + .. _multiprocessing context: + https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods + """ + + dataset: Dataset[T_co] + batch_size: Optional[int] + num_workers: int + pin_memory: bool + drop_last: bool + timeout: float + sampler: Union[Sampler, Iterable] + pin_memory_device: str + prefetch_factor: Optional[int] + _iterator : Optional['_BaseDataLoaderIter'] + __initialized = False + + def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, + shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, + batch_sampler: Union[Sampler[List], Iterable[List], None] = None, + num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, + pin_memory: bool = False, drop_last: bool = False, + timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, + multiprocessing_context=None, generator=None, + *, prefetch_factor: Optional[int] = None, + persistent_workers: bool = False, + pin_memory_device: str = ""): + torch._C._log_api_usage_once("python.data_loader") + + if num_workers < 0: + raise ValueError('num_workers option should be non-negative; ' + 'use num_workers=0 to disable multiprocessing.') + + if timeout < 0: + raise ValueError('timeout option should be non-negative') + + if num_workers == 0 and prefetch_factor is not None: + raise ValueError('prefetch_factor option could only be specified in multiprocessing.' + 'let num_workers > 0 to enable multiprocessing, otherwise set prefetch_factor to None.') + elif num_workers > 0 and prefetch_factor is None: + prefetch_factor = 2 + elif prefetch_factor is not None and prefetch_factor < 0: + raise ValueError('prefetch_factor option should be non-negative') + + if persistent_workers and num_workers == 0: + raise ValueError('persistent_workers option needs num_workers > 0') + + self.dataset = dataset + self.num_workers = num_workers + self.prefetch_factor = prefetch_factor + self.pin_memory = pin_memory + self.pin_memory_device = pin_memory_device + self.timeout = timeout + self.worker_init_fn = worker_init_fn + self.multiprocessing_context = multiprocessing_context + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler + if isinstance(self.dataset, IterDataPipe): + self.dataset = _IterDataPipeSerializationWrapper(self.dataset) + elif isinstance(self.dataset, MapDataPipe): + self.dataset = _MapDataPipeSerializationWrapper(self.dataset) + + # Arg-check dataset related before checking samplers because we want to + # tell users that iterable-style datasets are incompatible with custom + # samplers first, so that they don't learn that this combo doesn't work + # after spending time fixing the custom sampler errors. + if isinstance(dataset, IterableDataset): + self._dataset_kind = _DatasetKind.Iterable + # NOTE [ Custom Samplers and IterableDataset ] + # + # `IterableDataset` does not support custom `batch_sampler` or + # `sampler` since the key is irrelevant (unless we support + # generator-style dataset one day...). + # + # For `sampler`, we always create a dummy sampler. This is an + # infinite sampler even when the dataset may have an implemented + # finite `__len__` because in multi-process data loading, naive + # settings will return duplicated data (which may be desired), and + # thus using a sampler with length matching that of dataset will + # cause data lost (you may have duplicates of the first couple + # batches, but never see anything afterwards). Therefore, + # `Iterabledataset` always uses an infinite sampler, an instance of + # `_InfiniteConstantSampler` defined above. + # + # A custom `batch_sampler` essentially only controls the batch size. + # However, it is unclear how useful it would be since an iterable-style + # dataset can handle that within itself. Moreover, it is pointless + # in multi-process data loading as the assignment order of batches + # to workers is an implementation detail so users can not control + # how to batchify each worker's iterable. Thus, we disable this + # option. If this turns out to be useful in future, we can re-enable + # this, and support custom samplers that specify the assignments to + # specific workers. + if isinstance(dataset, IterDataPipe): + if shuffle is not None: + dataset = torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) + # We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default. + elif shuffle not in {False, None}: + raise ValueError( + f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}") + + if sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}") + elif batch_sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + f"batch_sampler option, but got batch_sampler={batch_sampler}") + else: + shuffle = bool(shuffle) + self._dataset_kind = _DatasetKind.Map + + + + if sampler is not None and shuffle: + raise ValueError('sampler option is mutually exclusive with ' + 'shuffle') + + if batch_sampler is not None: + # auto_collation with custom batch_sampler + if batch_size != 1 or shuffle or sampler is not None or drop_last: + raise ValueError('batch_sampler option is mutually exclusive ' + 'with batch_size, shuffle, sampler, and ' + 'drop_last') + batch_size = None + drop_last = False + elif batch_size is None: + # no auto_collation + if drop_last: + raise ValueError('batch_size=None option disables auto-batching ' + 'and is mutually exclusive with drop_last') + + if sampler is None: # give default samplers + if self._dataset_kind == _DatasetKind.Iterable: + # See NOTE [ Custom Samplers and IterableDataset ] + sampler = _InfiniteConstantSampler() + else: # map-style + if shuffle: + sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type] + else: + sampler = SequentialSampler(dataset) # type: ignore[arg-type] + + if batch_size is not None and batch_sampler is None: + # auto_collation without custom batch_sampler + batch_sampler = BatchSampler(sampler, batch_size, drop_last) + + self.batch_size = batch_size + self.drop_last = drop_last + self.sampler = sampler + self.batch_sampler = batch_sampler + self.generator = generator + + if collate_fn is None: + if self._auto_collation: + collate_fn = _utils.collate.default_collate + else: + collate_fn = _utils.collate.default_convert + + self.collate_fn = collate_fn + self.persistent_workers = persistent_workers + + self.__initialized = True + self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ] + + self._iterator = None + + self.check_worker_number_rationality() + + torch.set_vital('Dataloader', 'enabled', 'True') # type: ignore[attr-defined] + + def _get_iterator(self) -> '_BaseDataLoaderIter': + if self.num_workers == 0: + return _SingleProcessDataLoaderIter(self) + else: + self.check_worker_number_rationality() + return _MultiProcessingDataLoaderIter(self) + + @property + def multiprocessing_context(self): + return self.__multiprocessing_context + + @multiprocessing_context.setter + def multiprocessing_context(self, multiprocessing_context): + if multiprocessing_context is not None: + if self.num_workers > 0: + if isinstance(multiprocessing_context, str): + valid_start_methods = multiprocessing.get_all_start_methods() + if multiprocessing_context not in valid_start_methods: + raise ValueError( + 'multiprocessing_context option ' + f'should specify a valid start method in {valid_start_methods!r}, but got ' + f'multiprocessing_context={multiprocessing_context!r}') + multiprocessing_context = multiprocessing.get_context(multiprocessing_context) + + if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext): + raise TypeError('multiprocessing_context option should be a valid context ' + 'object or a string specifying the start method, but got ' + f'multiprocessing_context={multiprocessing_context}') + else: + raise ValueError('multiprocessing_context can only be used with ' + 'multi-process loading (num_workers > 0), but got ' + f'num_workers={self.num_workers}') + + self.__multiprocessing_context = multiprocessing_context + + def __setattr__(self, attr, val): + if self.__initialized and attr in ( + 'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'): + raise ValueError(f'{attr} attribute should not be set after {self.__class__.__name__} is initialized') + + super().__setattr__(attr, val) + + # We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up + # since '_BaseDataLoaderIter' references 'DataLoader'. + def __iter__(self) -> '_BaseDataLoaderIter': + # When using a single worker the returned iterator should be + # created everytime to avoid resetting its state + # However, in the case of a multiple workers iterator + # the iterator is only created once in the lifetime of the + # DataLoader object so that workers can be reused + if self.persistent_workers and self.num_workers > 0: + if self._iterator is None: + self._iterator = self._get_iterator() + else: + self._iterator._reset(self) + return self._iterator + else: + return self._get_iterator() + + @property + def _auto_collation(self): + return self.batch_sampler is not None + + @property + def _index_sampler(self): + # The actual sampler used for generating indices for `_DatasetFetcher` + # (see _utils/fetch.py) to read data at each time. This would be + # `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise. + # We can't change `.sampler` and `.batch_sampler` attributes for BC + # reasons. + if self._auto_collation: + return self.batch_sampler + else: + return self.sampler + + def __len__(self) -> int: + if self._dataset_kind == _DatasetKind.Iterable: + # NOTE [ IterableDataset and __len__ ] + # + # For `IterableDataset`, `__len__` could be inaccurate when one naively + # does multi-processing data loading, since the samples will be duplicated. + # However, no real use case should be actually using that behavior, so + # it should count as a user error. We should generally trust user + # code to do the proper thing (e.g., configure each replica differently + # in `__iter__`), and give us the correct `__len__` if they choose to + # implement it (this will still throw if the dataset does not implement + # a `__len__`). + # + # To provide a further warning, we track if `__len__` was called on the + # `DataLoader`, save the returned value in `self._len_called`, and warn + # if the iterator ends up yielding more than this number of samples. + + # Cannot statically verify that dataset is Sized + length = self._IterableDataset_len_called = len(self.dataset) # type: ignore[assignment, arg-type] + if self.batch_size is not None: # IterableDataset doesn't allow custom sampler or batch_sampler + from math import ceil + if self.drop_last: + length = length // self.batch_size + else: + length = ceil(length / self.batch_size) + return length + else: + return len(self._index_sampler) + + def check_worker_number_rationality(self): + # This function check whether the dataloader's worker number is rational based on + # current system's resource. Current rule is that if the number of workers this + # Dataloader will create is bigger than the number of logical cpus that is allowed to + # use, than we will pop up a warning to let user pay attention. + # + # eg. If current system has 2 physical CPUs with 16 cores each. And each core support 2 + # threads, then the total logical cpus here is 2 * 16 * 2 = 64. Let's say current + # DataLoader process can use half of them which is 32, then the rational max number of + # worker that initiated from this process is 32. + # Now, let's say the created DataLoader has num_works = 40, which is bigger than 32. + # So the warning message is triggered to notify the user to lower the worker number if + # necessary. + # + # + # [Note] Please note that this function repects `cpuset` only when os.sched_getaffinity is + # available (available in most of Linux system, but not OSX and Windows). + # When os.sched_getaffinity is not available, os.cpu_count() is called instead, but + # it doesn't repect cpuset. + # We don't take threading into account since each worker process is single threaded + # at this time. + # + # We don't set any threading flags (eg. OMP_NUM_THREADS, MKL_NUM_THREADS, etc) + # other than `torch.set_num_threads` to 1 in the worker process, if the passing + # in functions use 3rd party modules that rely on those threading flags to determine + # how many thread to create (eg. numpy, etc), then it is caller's responsibility to + # set those flags correctly. + def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked): + + suggested_max_worker_msg = (( + "Our suggested max number of worker in current system is {}{}, which is smaller " + "than what this DataLoader is going to create.").format( + num_worker_suggest, + ("" if cpuset_checked else " (`cpuset` is not taken into account)")) + ) if num_worker_suggest is not None else ( + "DataLoader is not able to compute a suggested max number of worker in current system.") + + warn_msg = ( + "This DataLoader will create {} worker processes in total. {} " + "Please be aware that excessive worker creation might get DataLoader running slow or even freeze, " + "lower the worker number to avoid potential slowness/freeze if necessary.").format( + num_worker_created, + suggested_max_worker_msg) + return warn_msg + + if not self.num_workers or self.num_workers == 0: + return + + # try to compute a suggested max number of worker based on system's resource + max_num_worker_suggest = None + cpuset_checked = False + if hasattr(os, 'sched_getaffinity'): + try: + max_num_worker_suggest = len(os.sched_getaffinity(0)) + cpuset_checked = True + except Exception: + pass + if max_num_worker_suggest is None: + # os.cpu_count() could return Optional[int] + # get cpu count first and check None in order to satisfy mypy check + cpu_count = os.cpu_count() + if cpu_count is not None: + max_num_worker_suggest = cpu_count + + if max_num_worker_suggest is None: + warnings.warn(_create_warning_msg( + max_num_worker_suggest, + self.num_workers, + cpuset_checked)) + return + + if self.num_workers > max_num_worker_suggest: + warnings.warn(_create_warning_msg( + max_num_worker_suggest, + self.num_workers, + cpuset_checked)) + + +class _BaseDataLoaderIter: + def __init__(self, loader: DataLoader) -> None: + self._dataset = loader.dataset + self._shared_seed = None + self._pg = None + if isinstance(self._dataset, IterDataPipe): + if dist.is_available() and dist.is_initialized(): + self._pg = dist.new_group(backend="gloo") + self._shared_seed = _share_dist_seed(loader.generator, self._pg) + shared_rng = torch.Generator() + shared_rng.manual_seed(self._shared_seed) + self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng) + self._dataset_kind = loader._dataset_kind + self._IterableDataset_len_called = loader._IterableDataset_len_called + self._auto_collation = loader._auto_collation + self._drop_last = loader.drop_last + self._index_sampler = loader._index_sampler + self._num_workers = loader.num_workers + ws, rank = _get_distributed_settings() + self._world_size = ws + self._rank = rank + # for other backends, pin_memory_device need to set. if not set + # default behaviour is CUDA device. if pin_memory_device is selected + # and pin_memory is not set, the default behaviour false. + if (len(loader.pin_memory_device) == 0): + self._pin_memory = loader.pin_memory and torch.cuda.is_available() + self._pin_memory_device = None + else: + if not loader.pin_memory: + warn_msg = ("pin memory device is set and pin_memory flag is not used then device pinned memory won't be used" + "please set pin_memory to true, if you need to use the device pin memory") + warnings.warn(warn_msg) + + self._pin_memory = loader.pin_memory + self._pin_memory_device = loader.pin_memory_device + self._timeout = loader.timeout + self._collate_fn = loader.collate_fn + self._sampler_iter = iter(self._index_sampler) + self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item() + self._persistent_workers = loader.persistent_workers + self._num_yielded = 0 + self._profile_name = f"enumerate(DataLoader)#{self.__class__.__name__}.__next__" + + def __iter__(self) -> '_BaseDataLoaderIter': + return self + + def _reset(self, loader, first_iter=False): + self._sampler_iter = iter(self._index_sampler) + self._num_yielded = 0 + self._IterableDataset_len_called = loader._IterableDataset_len_called + if isinstance(self._dataset, IterDataPipe): + self._shared_seed = _share_dist_seed(loader.generator, self._pg) + shared_rng = torch.Generator() + shared_rng.manual_seed(self._shared_seed) + self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng) + + def _next_index(self): + return next(self._sampler_iter) # may raise StopIteration + + def _next_data(self): + raise NotImplementedError + + def __next__(self) -> Any: + with torch.autograd.profiler.record_function(self._profile_name): + if self._sampler_iter is None: + # TODO(https://github.com/pytorch/pytorch/issues/76750) + self._reset() # type: ignore[call-arg] + data = self._next_data() + self._num_yielded += 1 + if self._dataset_kind == _DatasetKind.Iterable and \ + self._IterableDataset_len_called is not None and \ + self._num_yielded > self._IterableDataset_len_called: + warn_msg = ("Length of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} " + "samples have been fetched. ").format(self._dataset, self._IterableDataset_len_called, + self._num_yielded) + if self._num_workers > 0: + warn_msg += ("For multiprocessing data-loading, this could be caused by not properly configuring the " + "IterableDataset replica at each worker. Please see " + "https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.") + warnings.warn(warn_msg) + return data + + def __len__(self) -> int: + return len(self._index_sampler) + + def __getstate__(self): + # TODO: add limited pickling support for sharing an iterator + # across multiple threads for HOGWILD. + # Probably the best way to do this is by moving the sample pushing + # to a separate thread and then just sharing the data queue + # but signalling the end is tricky without a non-blocking API + raise NotImplementedError("{} cannot be pickled", self.__class__.__name__) + + +class _SingleProcessDataLoaderIter(_BaseDataLoaderIter): + def __init__(self, loader): + super().__init__(loader) + assert self._timeout == 0 + assert self._num_workers == 0 + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # Taking care of distributed sharding + if isinstance(self._dataset, (IterDataPipe, MapDataPipe)): + # For BC, use default SHARDING_PRIORITIES + torch.utils.data.graph_settings.apply_sharding(self._dataset, self._world_size, self._rank) + + self._dataset_fetcher = _DatasetKind.create_fetcher( + self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last) + + def _next_data(self): + index = self._next_index() # may raise StopIteration + data = self._dataset_fetcher.fetch(index) # may raise StopIteration + if self._pin_memory: + data = _utils.pin_memory.pin_memory(data, self._pin_memory_device) + return data + + +class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter): + r"""Iterates once over the DataLoader's dataset, as specified by the sampler.""" + + # NOTE [ Data Loader Multiprocessing Shutdown Logic ] + # + # Preliminary: + # + # Our data model looks like this (queues are indicated with curly brackets): + # + # main process || + # | || + # {index_queue} || + # | || + # worker processes || DATA + # | || + # {worker_result_queue} || FLOW + # | || + # pin_memory_thread of main process || DIRECTION + # | || + # {data_queue} || + # | || + # data output \/ + # + # P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if + # `pin_memory=False`. + # + # + # Terminating multiprocessing logic requires very careful design. In + # particular, we need to make sure that + # + # 1. The iterator gracefully exits the workers when its last reference is + # gone or it is depleted. + # + # In this case, the workers should be gracefully exited because the + # main process may still need to continue to run, and we want cleaning + # up code in the workers to be executed (e.g., releasing GPU memory). + # Naturally, we implement the shutdown logic in `__del__` of + # DataLoaderIterator. + # + # We delay the discussion on the logic in this case until later. + # + # 2. The iterator exits the workers when the loader process and/or worker + # processes exits normally or with error. + # + # We set all workers and `pin_memory_thread` to have `daemon=True`. + # + # You may ask, why can't we make the workers non-daemonic, and + # gracefully exit using the same logic as we have in `__del__` when the + # iterator gets deleted (see 1 above)? + # + # First of all, `__del__` is **not** guaranteed to be called when + # interpreter exits. Even if it is called, by the time it executes, + # many Python core library resources may already be freed, and even + # simple things like acquiring an internal lock of a queue may hang. + # Therefore, in this case, we actually need to prevent `__del__` from + # being executed, and rely on the automatic termination of daemonic + # children. + # + # Thus, we register an `atexit` hook that sets a global flag + # `_utils.python_exit_status`. Since `atexit` hooks are executed in the + # reverse order of registration, we are guaranteed that this flag is + # set before library resources we use are freed (which, at least in + # CPython, is done via an `atexit` handler defined in + # `multiprocessing/util.py` + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362 + # registered when an object requiring this mechanism is first + # created, e.g., `mp.Queue` + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103 + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29 + # ) + # + # So in `__del__`, we check if `_utils.python_exit_status` is set or + # `None` (freed), and perform no-op if so. + # + # However, simply letting library clean-up codes run can also be bad, + # because such codes (i.e., `multiprocessing.util._exit_function()`) + # include join putting threads for `mp.Queue`, which can be blocking. + # Hence, the main process putting threads are called with + # `cancel_join_thread` at creation. See later section + # [ 3b. A process won't hang when putting into a queue; ] + # for more details. + # + # Here are two example cases where library clean-up codes can run + # before `__del__` is called: + # + # 1. If we hold onto a reference to the iterator, it more often + # than not tries to do `multiprocessing` library cleaning before + # clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666) + # and thus prevents our cleaning-up code to run first. + # + # 2. A similar issue araises when a `DataLoader` is used in a subprocess. + # When a process ends, it shuts the all its daemonic children + # down with a SIGTERM (instead of joining them without a timeout). + # Simiarly for threads, but by a different mechanism. This fact, + # together with a few implementation details of multiprocessing, forces + # us to make workers daemonic. All of our problems arise when a + # DataLoader is used in a subprocess, and are caused by multiprocessing + # code which looks more or less like this: + # + # try: + # your_function_using_a_dataloader() + # finally: + # multiprocessing.util._exit_function() + # + # The joining/termination mentioned above happens inside + # `_exit_function()`. Now, if `your_function_using_a_dataloader()` + # throws, the stack trace stored in the exception will prevent the + # frame which uses `DataLoaderIter` to be freed. If the frame has any + # reference to the `DataLoaderIter` (e.g., in a method of the iter), + # its `__del__`, which starts the shutdown procedure, will not be + # called. That, in turn, means that workers aren't notified. Attempting + # to join in `_exit_function` will then result in a hang. + # + # For context, `_exit_function` is also registered as an `atexit` call. + # So it is unclear to me (@ssnl) why this is needed in a finally block. + # The code dates back to 2008 and there is no comment on the original + # PEP 371 or patch https://bugs.python.org/issue3050 (containing both + # the finally block and the `atexit` registration) that explains this. + # + # + # Finally, another choice is to just shutdown workers with logic in 1 + # above whenever we see an error in `next`. This isn't ideal because + # a. It prevents users from using try-catch to resume data loading. + # b. It doesn't prevent hanging if users have references to the + # iterator. + # + # 3. All processes exit if any of them die unexpectedly by fatal signals. + # + # As shown above, the workers are set as daemonic children of the main + # process. However, automatic cleaning-up of such child processes only + # happens if the parent process exits gracefully (e.g., not via fatal + # signals like SIGKILL). So we must ensure that each process will exit + # even the process that should send/receive data to/from it were + # killed, i.e., + # + # a. A process won't hang when getting from a queue. + # + # Even with carefully designed data dependencies (i.e., a `put()` + # always corresponding to a `get()`), hanging on `get()` can still + # happen when data in queue is corrupted (e.g., due to + # `cancel_join_thread` or unexpected exit). + # + # For child exit, we set a timeout whenever we try to get data + # from `data_queue`, and check the workers' status on each timeout + # and error. + # See `_DataLoaderiter._get_batch()` and + # `_DataLoaderiter._try_get_data()` for details. + # + # Additionally, for child exit on non-Windows platforms, we also + # register a SIGCHLD handler (which is supported on Windows) on + # the main process, which checks if any of the workers fail in the + # (Python) handler. This is more efficient and faster in detecting + # worker failures, compared to only using the above mechanism. + # See `DataLoader.cpp` and `_utils/signal_handling.py` for details. + # + # For `.get()` calls where the sender(s) is not the workers, we + # guard them with timeouts, and check the status of the sender + # when timeout happens: + # + in the workers, the `_utils.worker.ManagerWatchdog` class + # checks the status of the main process. + # + if `pin_memory=True`, when getting from `pin_memory_thread`, + # check `pin_memory_thread` status periodically until `.get()` + # returns or see that `pin_memory_thread` died. + # + # b. A process won't hang when putting into a queue; + # + # We use `mp.Queue` which has a separate background thread to put + # objects from an unbounded buffer array. The background thread is + # daemonic and usually automatically joined when the process + # *exits*. + # + # In case that the receiver has ended abruptly while + # reading from the pipe, the join will hang forever. The usual + # solution for this in Python is calling `q.cancel_join_thread`, + # which prevents automatically joining it when finalizing + # (exiting). + # + # Nonetheless, `cancel_join_thread` must only be called when the + # queue is **not** going to be read from or write into by another + # process, because it may hold onto a lock or leave corrupted data + # in the queue, leading other readers/writers to hang. + # + # Hence, + # + For worker processes, we only do so (for their output + # queues, i.e., `worker_result_queue`) before exiting. + # + For `pin_memory_thread`, its output queue `data_queue` is a + # `queue.Queue` that does blocking `put` if the queue is full. + # So there is no above problem, but as a result, in + # `_pin_memory_loop`, we do need to wrap the `put` in a loop + # that breaks not only upon success, but also when the main + # process stops reading, i.e., is shutting down. + # + For loader process, we `cancel_join_thread()` for all + # `_index_queues` because the whole purpose of workers and + # `pin_memory_thread` is to serve the loader process. If + # loader process is already exiting, we don't really care if + # the queues are corrupted. + # + # + # Now let's get back to 1: + # how we gracefully exit the workers when the last reference to the + # iterator is gone. + # + # To achieve this, we implement the following logic along with the design + # choices mentioned above: + # + # `workers_done_event`: + # A `multiprocessing.Event` shared among the main process and all worker + # processes. This is used to signal the workers that the iterator is + # shutting down. After it is set, they will not send processed data to + # queues anymore, and only wait for the final `None` before exiting. + # `done_event` isn't strictly needed. I.e., we can just check for `None` + # from the input queue, but it allows us to skip wasting resources + # processing data if we are already shutting down. + # + # `pin_memory_thread_done_event`: + # A `threading.Event` for a similar purpose to that of + # `workers_done_event`, but is for the `pin_memory_thread`. The reason + # that separate events are needed is that `pin_memory_thread` reads from + # the output queue of the workers. But the workers, upon seeing that + # `workers_done_event` is set, only wants to see the final `None`, and is + # not required to flush all data in the output queue (e.g., it may call + # `cancel_join_thread` on that queue if its `IterableDataset` iterator + # happens to exhaust coincidentally, which is out of the control of the + # main process). Thus, since we will exit `pin_memory_thread` before the + # workers (see below), two separete events are used. + # + # NOTE: In short, the protocol is that the main process will set these + # `done_event`s and then the corresponding processes/threads a `None`, + # and that they may exit at any time after receiving the `None`. + # + # NOTE: Using `None` as the final signal is valid, since normal data will + # always be a 2-tuple with the 1st element being the index of the data + # transferred (different from dataset index/key), and the 2nd being + # either the dataset key or the data sample (depending on which part + # of the data model the queue is at). + # + # [ worker processes ] + # While loader process is alive: + # Get from `index_queue`. + # If get anything else, + # Check `workers_done_event`. + # If set, continue to next iteration + # i.e., keep getting until see the `None`, then exit. + # Otherwise, process data: + # If is fetching from an `IterableDataset` and the iterator + # is exhausted, send an `_IterableDatasetStopIteration` + # object to signal iteration end. The main process, upon + # receiving such an object, will send `None` to this + # worker and not use the corresponding `index_queue` + # anymore. + # If timed out, + # No matter `workers_done_event` is set (still need to see `None`) + # or not, must continue to next iteration. + # (outside loop) + # If `workers_done_event` is set, (this can be False with `IterableDataset`) + # `data_queue.cancel_join_thread()`. (Everything is ending here: + # main process won't read from it; + # other workers will also call + # `cancel_join_thread`.) + # + # [ pin_memory_thread ] + # # No need to check main thread. If this thread is alive, the main loader + # # thread must be alive, because this thread is set as daemonic. + # While `pin_memory_thread_done_event` is not set: + # Get from `worker_result_queue`. + # If timed out, continue to get in the next iteration. + # Otherwise, process data. + # While `pin_memory_thread_done_event` is not set: + # Put processed data to `data_queue` (a `queue.Queue` with blocking put) + # If timed out, continue to put in the next iteration. + # Otherwise, break, i.e., continuing to the out loop. + # + # NOTE: we don't check the status of the main thread because + # 1. if the process is killed by fatal signal, `pin_memory_thread` + # ends. + # 2. in other cases, either the cleaning-up in __del__ or the + # automatic exit of daemonic thread will take care of it. + # This won't busy-wait either because `.get(timeout)` does not + # busy-wait. + # + # [ main process ] + # In the DataLoader Iter's `__del__` + # b. Exit `pin_memory_thread` + # i. Set `pin_memory_thread_done_event`. + # ii Put `None` in `worker_result_queue`. + # iii. Join the `pin_memory_thread`. + # iv. `worker_result_queue.cancel_join_thread()`. + # + # c. Exit the workers. + # i. Set `workers_done_event`. + # ii. Put `None` in each worker's `index_queue`. + # iii. Join the workers. + # iv. Call `.cancel_join_thread()` on each worker's `index_queue`. + # + # NOTE: (c) is better placed after (b) because it may leave corrupted + # data in `worker_result_queue`, which `pin_memory_thread` + # reads from, in which case the `pin_memory_thread` can only + # happen at timing out, which is slow. Nonetheless, same thing + # happens if a worker is killed by signal at unfortunate times, + # but in other cases, we are better off having a non-corrupted + # `worker_result_queue` for `pin_memory_thread`. + # + # NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b) + # can be omitted + # + # NB: `done_event`s isn't strictly needed. E.g., we can just check for + # `None` from `index_queue`, but it allows us to skip wasting resources + # processing indices already in `index_queue` if we are already shutting + # down. + + def __init__(self, loader): + super().__init__(loader) + + self._prefetch_factor = loader.prefetch_factor + + assert self._num_workers > 0 + assert self._prefetch_factor > 0 + + if loader.multiprocessing_context is None: + multiprocessing_context = multiprocessing + else: + multiprocessing_context = loader.multiprocessing_context + + self._worker_init_fn = loader.worker_init_fn + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # Additional worker init function will take care of sharding in MP and Distributed + if isinstance(self._dataset, (IterDataPipe, MapDataPipe)): + self._worker_init_fn = functools.partial( + _sharding_worker_init_fn, self._worker_init_fn, self._world_size, self._rank) + + # No certainty which module multiprocessing_context is + self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated] + self._worker_pids_set = False + self._shutdown = False + self._workers_done_event = multiprocessing_context.Event() + + self._index_queues = [] + self._workers = [] + for i in range(self._num_workers): + # No certainty which module multiprocessing_context is + index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated] + # Need to `cancel_join_thread` here! + # See sections (2) and (3b) above. + index_queue.cancel_join_thread() + w = multiprocessing_context.Process( + target=_utils.worker._worker_loop, + args=(self._dataset_kind, self._dataset, index_queue, + self._worker_result_queue, self._workers_done_event, + self._auto_collation, self._collate_fn, self._drop_last, + self._base_seed, self._worker_init_fn, i, self._num_workers, + self._persistent_workers, self._shared_seed)) + w.daemon = True + # NB: Process.start() actually take some time as it needs to + # start a process and pass the arguments over via a pipe. + # Therefore, we only add a worker to self._workers list after + # it started, so that we do not call .join() if program dies + # before it starts, and __del__ tries to join but will get: + # AssertionError: can only join a started process. + w.start() + self._index_queues.append(index_queue) + self._workers.append(w) + + if self._pin_memory: + self._pin_memory_thread_done_event = threading.Event() + + # Queue is not type-annotated + self._data_queue = queue.Queue() # type: ignore[var-annotated] + if self._pin_memory_device == "xpu": + current_device = torch.xpu.current_device() # type: ignore[attr-defined] + elif self._pin_memory_device == torch._C._get_privateuse1_backend_name(): + custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name()) + current_device = custom_device_mod.current_device() + else: + current_device = torch.cuda.current_device() # choose cuda for default + pin_memory_thread = threading.Thread( + target=_utils.pin_memory._pin_memory_loop, + args=(self._worker_result_queue, self._data_queue, + current_device, + self._pin_memory_thread_done_event, self._pin_memory_device)) + pin_memory_thread.daemon = True + pin_memory_thread.start() + # Similar to workers (see comment above), we only register + # pin_memory_thread once it is started. + self._pin_memory_thread = pin_memory_thread + else: + self._data_queue = self._worker_result_queue # type: ignore[assignment] + + # In some rare cases, persistent workers (daemonic processes) + # would be terminated before `__del__` of iterator is invoked + # when main process exits + # It would cause failure when pin_memory_thread tries to read + # corrupted data from worker_result_queue + # atexit is used to shutdown thread and child processes in the + # right sequence before main process exits + if self._persistent_workers and self._pin_memory: + import atexit + for w in self._workers: + atexit.register(_MultiProcessingDataLoaderIter._clean_up_worker, w) + + # .pid can be None only before process is spawned (not the case, so ignore) + _utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore[misc] + _utils.signal_handling._set_SIGCHLD_handler() + self._worker_pids_set = True + self._reset(loader, first_iter=True) + + def _reset(self, loader, first_iter=False): + super()._reset(loader, first_iter) + self._send_idx = 0 # idx of the next task to be sent to workers + self._rcvd_idx = 0 # idx of the next task to be returned in __next__ + # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx). + # map: task idx => - (worker_id,) if data isn't fetched (outstanding) + # \ (worker_id, data) if data is already fetched (out-of-order) + self._task_info = {} + self._tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1) + # A list of booleans representing whether each worker still has work to + # do, i.e., not having exhausted its iterable dataset object. It always + # contains all `True`s if not using an iterable-style dataset + # (i.e., if kind != Iterable). + # Not that this indicates that a worker still has work to do *for this epoch*. + # It does not mean that a worker is dead. In case of `_persistent_workers`, + # the worker will be reset to available in the next epoch. + self._workers_status = [True for i in range(self._num_workers)] + # Reset the worker queue cycle so it resumes next epoch at worker 0 + self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers)) + # We resume the prefetching in case it was enabled + if not first_iter: + for idx in range(self._num_workers): + self._index_queues[idx].put(_utils.worker._ResumeIteration(self._shared_seed)) + resume_iteration_cnt = self._num_workers + while resume_iteration_cnt > 0: + return_idx, return_data = self._get_data() + if isinstance(return_idx, _utils.worker._ResumeIteration): + assert return_data is None + resume_iteration_cnt -= 1 + # prime the prefetch loop + for _ in range(self._prefetch_factor * self._num_workers): + self._try_put_index() + + def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL): + # Tries to fetch data from `self._data_queue` once for a given timeout. + # This can also be used as inner loop of fetching without timeout, with + # the sender status as the loop condition. + # + # This raises a `RuntimeError` if any worker died expectedly. This error + # can come from either the SIGCHLD handler in `_utils/signal_handling.py` + # (only for non-Windows platforms), or the manual check below on errors + # and timeouts. + # + # Returns a 2-tuple: + # (bool: whether successfully get data, any: data if successful else None) + try: + data = self._data_queue.get(timeout=timeout) + return (True, data) + except Exception as e: + # At timeout and error, we manually check whether any worker has + # failed. Note that this is the only mechanism for Windows to detect + # worker failures. + failed_workers = [] + for worker_id, w in enumerate(self._workers): + if self._workers_status[worker_id] and not w.is_alive(): + failed_workers.append(w) + self._mark_worker_as_unavailable(worker_id) + if len(failed_workers) > 0: + pids_str = ', '.join(str(w.pid) for w in failed_workers) + raise RuntimeError(f'DataLoader worker (pid(s) {pids_str}) exited unexpectedly') from e + if isinstance(e, queue.Empty): + return (False, None) + import tempfile + import errno + try: + # Raise an exception if we are this close to the FDs limit. + # Apparently, trying to open only one file is not a sufficient + # test. + # See NOTE [ DataLoader on Linux and open files limit ] + fds_limit_margin = 10 + fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)] + except OSError as e: + if e.errno == errno.EMFILE: + raise RuntimeError( + "Too many open files. Communication with the" + " workers is no longer possible. Please increase the" + " limit using `ulimit -n` in the shell or change the" + " sharing strategy by calling" + " `torch.multiprocessing.set_sharing_strategy('file_system')`" + " at the beginning of your code") from None + raise + +# NOTE [ DataLoader on Linux and open files limit ] +# +# On Linux when DataLoader is used with multiprocessing we pass the data between +# the root process and the workers through SHM files. We remove those files from +# the filesystem as soon as they are created and keep them alive by +# passing around their file descriptors through AF_UNIX sockets. (See +# docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in +# the wiki (https://github.com/pytorch/pytorch/wiki).) +# +# This sometimes leads us to exceeding the open files limit. When that happens, +# and the offending file descriptor is coming over a socket, the `socket` Python +# package silently strips the file descriptor from the message, setting only the +# `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that +# it _indicates that some control data were discarded due to lack of space in +# the buffer for ancillary data_). This might reflect the C implementation of +# AF_UNIX sockets. +# +# This behaviour can be reproduced with the script and instructions at the +# bottom of this note. +# +# When that happens, the standard Python `multiprocessing` (and not +# `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata` +# +# Sometimes, instead of the FD being stripped, you may get an `OSError: +# Too many open files`, both in the script below and in DataLoader. However, +# this is rare and seems to be nondeterministic. +# +# +# #!/usr/bin/env python3 +# import sys +# import socket +# import os +# import array +# import shutil +# import socket +# +# +# if len(sys.argv) != 4: +# print("Usage: ", sys.argv[0], " tmp_dirname iteration (send|recv)") +# sys.exit(1) +# +# if __name__ == '__main__': +# dirname = sys.argv[1] +# sock_path = dirname + "/sock" +# iterations = int(sys.argv[2]) +# def dummy_path(i): +# return dirname + "/" + str(i) + ".dummy" +# +# +# if sys.argv[3] == 'send': +# while not os.path.exists(sock_path): +# pass +# client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) +# client.connect(sock_path) +# for i in range(iterations): +# fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT) +# ancdata = array.array('i', [fd]) +# msg = bytes([i % 256]) +# print("Sending fd ", fd, " (iteration #", i, ")") +# client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)]) +# +# +# else: +# assert sys.argv[3] == 'recv' +# +# if os.path.exists(dirname): +# raise Exception("Directory exists") +# +# os.mkdir(dirname) +# +# print("Opening socket...") +# server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) +# server.bind(sock_path) +# +# print("Listening...") +# for i in range(iterations): +# a = array.array('i') +# msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize)) +# assert(len(ancdata) == 1) +# cmsg_level, cmsg_type, cmsg_data = ancdata[0] +# a.frombytes(cmsg_data) +# print("Received fd ", a[0], " (iteration #", i, ")") +# +# shutil.rmtree(dirname) +# +# Steps to reproduce: +# +# 1. Run two shells and set lower file descriptor limit in the receiving one: +# (shell1) ulimit -n 1020 +# (shell2) ulimit -n 1022 +# +# 2. Run the script above with the `recv` option in the first shell +# (shell1) ./test_socket.py sock_tmp 1017 recv +# +# 3. Run the script with the `send` option in the second shell: +# (shell2) ./test_socket.py sock_tmp 1017 send + + def _get_data(self): + # Fetches data from `self._data_queue`. + # + # We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds, + # which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)` + # in a loop. This is the only mechanism to detect worker failures for + # Windows. For other platforms, a SIGCHLD handler is also used for + # worker failure detection. + # + # If `pin_memory=True`, we also need check if `pin_memory_thread` had + # died at timeouts. + if self._timeout > 0: + success, data = self._try_get_data(self._timeout) + if success: + return data + else: + raise RuntimeError(f'DataLoader timed out after {self._timeout} seconds') + elif self._pin_memory: + while self._pin_memory_thread.is_alive(): + success, data = self._try_get_data() + if success: + return data + else: + # while condition is false, i.e., pin_memory_thread died. + raise RuntimeError('Pin memory thread exited unexpectedly') + # In this case, `self._data_queue` is a `queue.Queue`,. But we don't + # need to call `.task_done()` because we don't use `.join()`. + else: + while True: + success, data = self._try_get_data() + if success: + return data + + def _next_data(self): + while True: + # If the worker responsible for `self._rcvd_idx` has already ended + # and was unable to fulfill this task (due to exhausting an `IterableDataset`), + # we try to advance `self._rcvd_idx` to find the next valid index. + # + # This part needs to run in the loop because both the `self._get_data()` + # call and `_IterableDatasetStopIteration` check below can mark + # extra worker(s) as dead. + while self._rcvd_idx < self._send_idx: + info = self._task_info[self._rcvd_idx] + worker_id = info[0] + if len(info) == 2 or self._workers_status[worker_id]: # has data or is still active + break + del self._task_info[self._rcvd_idx] + self._rcvd_idx += 1 + else: + # no valid `self._rcvd_idx` is found (i.e., didn't break) + if not self._persistent_workers: + self._shutdown_workers() + raise StopIteration + + # Now `self._rcvd_idx` is the batch index we want to fetch + + # Check if the next sample has already been generated + if len(self._task_info[self._rcvd_idx]) == 2: + data = self._task_info.pop(self._rcvd_idx)[1] + return self._process_data(data) + + assert not self._shutdown and self._tasks_outstanding > 0 + idx, data = self._get_data() + self._tasks_outstanding -= 1 + if self._dataset_kind == _DatasetKind.Iterable: + # Check for _IterableDatasetStopIteration + if isinstance(data, _utils.worker._IterableDatasetStopIteration): + if self._persistent_workers: + self._workers_status[data.worker_id] = False + else: + self._mark_worker_as_unavailable(data.worker_id) + self._try_put_index() + continue + + if idx != self._rcvd_idx: + # store out-of-order samples + self._task_info[idx] += (data,) + else: + del self._task_info[idx] + return self._process_data(data) + + def _try_put_index(self): + assert self._tasks_outstanding < self._prefetch_factor * self._num_workers + + try: + index = self._next_index() + except StopIteration: + return + for _ in range(self._num_workers): # find the next active worker, if any + worker_queue_idx = next(self._worker_queue_idx_cycle) + if self._workers_status[worker_queue_idx]: + break + else: + # not found (i.e., didn't break) + return + + self._index_queues[worker_queue_idx].put((self._send_idx, index)) # type: ignore[possibly-undefined] + self._task_info[self._send_idx] = (worker_queue_idx,) + self._tasks_outstanding += 1 + self._send_idx += 1 + + def _process_data(self, data): + self._rcvd_idx += 1 + self._try_put_index() + if isinstance(data, ExceptionWrapper): + data.reraise() + return data + + def _mark_worker_as_unavailable(self, worker_id, shutdown=False): + # Mark a worker as having finished its work e.g., due to + # exhausting an `IterableDataset`. This should be used only when this + # `_MultiProcessingDataLoaderIter` is going to continue running. + + assert self._workers_status[worker_id] or (self._persistent_workers and shutdown) + + # Signal termination to that specific worker. + q = self._index_queues[worker_id] + # Indicate that no more data will be put on this queue by the current + # process. + q.put(None) + + # Note that we don't actually join the worker here, nor do we remove the + # worker's pid from C side struct because (1) joining may be slow, and + # (2) since we don't join, the worker may still raise error, and we + # prefer capturing those, rather than ignoring them, even though they + # are raised after the worker has finished its job. + # Joinning is deferred to `_shutdown_workers`, which it is called when + # all workers finish their jobs (e.g., `IterableDataset` replicas) or + # when this iterator is garbage collected. + + self._workers_status[worker_id] = False + + assert self._workers_done_event.is_set() == shutdown + + def _shutdown_workers(self): + # Called when shutting down this `_MultiProcessingDataLoaderIter`. + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on + # the logic of this function. + if _utils is None or _utils.python_exit_status is True or _utils.python_exit_status is None: + # See (2) of the note. If Python is shutting down, do no-op. + return + # Normal exit when last reference is gone / iterator is depleted. + # See (1) and the second half of the note. + if not self._shutdown: + self._shutdown = True + try: + # Normal exit when last reference is gone / iterator is depleted. + # See (1) and the second half of the note. + + # Exit `pin_memory_thread` first because exiting workers may leave + # corrupted data in `worker_result_queue` which `pin_memory_thread` + # reads from. + if hasattr(self, '_pin_memory_thread'): + # Use hasattr in case error happens before we set the attribute. + self._pin_memory_thread_done_event.set() + # Send something to pin_memory_thread in case it is waiting + # so that it can wake up and check `pin_memory_thread_done_event` + self._worker_result_queue.put((None, None)) + self._pin_memory_thread.join() + self._worker_result_queue.cancel_join_thread() + self._worker_result_queue.close() + + # Exit workers now. + self._workers_done_event.set() + for worker_id in range(len(self._workers)): + # Get number of workers from `len(self._workers)` instead of + # `self._num_workers` in case we error before starting all + # workers. + # If we are using workers_status with persistent_workers + # we have to shut it down because the worker is paused + if self._persistent_workers or self._workers_status[worker_id]: + self._mark_worker_as_unavailable(worker_id, shutdown=True) + for w in self._workers: + # We should be able to join here, but in case anything went + # wrong, we set a timeout and if the workers fail to join, + # they are killed in the `finally` block. + w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL) + for q in self._index_queues: + q.cancel_join_thread() + q.close() + finally: + # Even though all this function does is putting into queues that + # we have called `cancel_join_thread` on, weird things can + # happen when a worker is killed by a signal, e.g., hanging in + # `Event.set()`. So we need to guard this with SIGCHLD handler, + # and remove pids from the C side data structure only at the + # end. + # + # FIXME: Unfortunately, for Windows, we are missing a worker + # error detection mechanism here in this function, as it + # doesn't provide a SIGCHLD handler. + if self._worker_pids_set: + _utils.signal_handling._remove_worker_pids(id(self)) + self._worker_pids_set = False + for w in self._workers: + if w.is_alive(): + # Existing mechanisms try to make the workers exit + # peacefully, but in case that we unfortunately reach + # here, which we shouldn't, (e.g., pytorch/pytorch#39570), + # we kill the worker. + w.terminate() + + # staticmethod is used to remove reference to `_MultiProcessingDataLoaderIter` + @staticmethod + def _clean_up_worker(w): + try: + w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL) + finally: + if w.is_alive(): + w.terminate() + + def __del__(self): + self._shutdown_workers() diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f19389e21bfefff0ea2705680d0c133730cfa228 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py @@ -0,0 +1,3 @@ +from . import iter +from . import map +from . import dataframe diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..b67b73158575eeebaf75ccd7d8f4bb571024fc29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py @@ -0,0 +1,184 @@ +import inspect +from functools import wraps +from typing import Any, Callable, Optional, Type, Union, get_type_hints +from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe +from torch.utils.data.datapipes._typing import _DataPipeMeta + + +###################################################### +# Functional API +###################################################### +class functional_datapipe: + name: str + + def __init__(self, name: str, enable_df_api_tracing=False) -> None: + """ + Define a functional datapipe. + + Args: + enable_df_api_tracing - if set, any returned DataPipe would accept + DataFrames API in tracing mode. + """ + self.name = name + self.enable_df_api_tracing = enable_df_api_tracing + + def __call__(self, cls): + if issubclass(cls, IterDataPipe): + if isinstance(cls, Type): # type: ignore[arg-type] + if not isinstance(cls, _DataPipeMeta): + raise TypeError('`functional_datapipe` can only decorate IterDataPipe') + # with non_deterministic decorator + else: + if not isinstance(cls, non_deterministic) and \ + not (hasattr(cls, '__self__') and + isinstance(cls.__self__, non_deterministic)): + raise TypeError('`functional_datapipe` can only decorate IterDataPipe') + IterDataPipe.register_datapipe_as_function(self.name, cls, enable_df_api_tracing=self.enable_df_api_tracing) + elif issubclass(cls, MapDataPipe): + MapDataPipe.register_datapipe_as_function(self.name, cls) + + return cls + + +###################################################### +# Determinism +###################################################### +_determinism: bool = False + + +class guaranteed_datapipes_determinism: + prev: bool + + def __init__(self) -> None: + global _determinism + self.prev = _determinism + _determinism = True + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + global _determinism + _determinism = self.prev + + +class non_deterministic: + cls: Optional[Type[IterDataPipe]] = None + # TODO: Lambda for picking + deterministic_fn: Callable[[], bool] + + def __init__(self, arg: Union[Type[IterDataPipe], Callable[[], bool]]) -> None: + # 1. Decorator doesn't have any argument + if isinstance(arg, Type): # type: ignore[arg-type] + if not issubclass(arg, IterDataPipe): # type: ignore[arg-type] + raise TypeError("Only `IterDataPipe` can be decorated with `non_deterministic`" + f", but {arg.__name__} is found") + self.cls = arg # type: ignore[assignment] + # 2. Decorator has an argument of a function + # This class should behave differently given different inputs. Use this + # function to verify the determinism for each instance. + # When the function returns True, the instance is non-deterministic. Otherwise, + # the instance is a deterministic DataPipe. + elif isinstance(arg, Callable): # type:ignore[arg-type] + self.deterministic_fn = arg # type: ignore[assignment, misc] + else: + raise TypeError(f"{arg} can not be decorated by non_deterministic") + + def __call__(self, *args, **kwargs): + global _determinism + # Decorate IterDataPipe + if self.cls is not None: + if _determinism: + raise TypeError("{} is non-deterministic, but you set 'guaranteed_datapipes_determinism'. " + "You can turn off determinism for this DataPipe if that is acceptable " + "for your application".format(self.cls.__name__)) + return self.cls(*args, **kwargs) # type: ignore[call-arg] + + # Decorate with a functional argument + if not (isinstance(args[0], Type) and # type: ignore[arg-type] + issubclass(args[0], IterDataPipe)): + raise TypeError(f"Only `IterDataPipe` can be decorated, but {args[0].__name__} is found") + self.cls = args[0] + return self.deterministic_wrapper_fn + + def deterministic_wrapper_fn(self, *args, **kwargs) -> IterDataPipe: + res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc] + if not isinstance(res, bool): + raise TypeError("deterministic_fn of `non_deterministic` decorator is required " + f"to return a boolean value, but {type(res)} is found") + global _determinism + if _determinism and res: + raise TypeError(f"{self.cls.__name__} is non-deterministic with the inputs, but you set " # type: ignore[union-attr] + "'guaranteed_datapipes_determinism'. You can turn off determinism " + "for this DataPipe if that is acceptable for your application" + ) + return self.cls(*args, **kwargs) # type: ignore[call-arg, misc] + + +###################################################### +# Type validation +###################################################### +# Validate each argument of DataPipe with hint as a subtype of the hint. +def argument_validation(f): + signature = inspect.signature(f) + hints = get_type_hints(f) + + @wraps(f) + def wrapper(*args, **kwargs): + bound = signature.bind(*args, **kwargs) + for argument_name, value in bound.arguments.items(): + if argument_name in hints and isinstance(hints[argument_name], _DataPipeMeta): + hint = hints[argument_name] + if not isinstance(value, IterDataPipe): + raise TypeError(f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}") + if not value.type.issubtype(hint.type): + raise TypeError(f"Expected type of argument '{argument_name}' as a subtype of " + f"hint {hint.type}, but found {value.type}" + ) + + return f(*args, **kwargs) + + return wrapper + + +# Default value is True +_runtime_validation_enabled: bool = True + + +class runtime_validation_disabled: + prev: bool + + def __init__(self) -> None: + global _runtime_validation_enabled + self.prev = _runtime_validation_enabled + _runtime_validation_enabled = False + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + global _runtime_validation_enabled + _runtime_validation_enabled = self.prev + + +# Runtime checking +# Validate output data is subtype of return hint +def runtime_validation(f): + # TODO: + # Can be extended to validate '__getitem__' and nonblocking + if f.__name__ != '__iter__': + raise TypeError(f"Can not decorate function {f.__name__} with 'runtime_validation'") + + @wraps(f) + def wrapper(self): + global _runtime_validation_enabled + if not _runtime_validation_enabled: + yield from f(self) + else: + it = f(self) + for d in it: + if not self.type.issubtype_of_instance(d): + raise RuntimeError(f"Expected an instance as subtype of {self.type}, but found {d}({type(d)})") + yield d + + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py new file mode 100644 index 0000000000000000000000000000000000000000..7463cc55d27c97aeb0af44433451e581b811b127 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py @@ -0,0 +1,248 @@ +import inspect +import functools +from enum import Enum + +import torch.autograd + + +class _SnapshotState(Enum): + r""" + These are the snapshotting-related states that IterDataPipes can be in. + + `NotStarted` - allows you to restore a snapshot and create an iterator with reset + `Restored` - cannot restore again, allows you to create an iterator without resetting the DataPipe + `Iterating` - can restore, will reset if you create a new iterator + """ + + NotStarted = 0 + Restored = 1 + Iterating = 2 + + +def _simplify_obj_name(obj) -> str: + """Simplify the display strings of objects for the purpose of rendering within DataPipe error messages.""" + if inspect.isfunction(obj): + return obj.__name__ + else: + return repr(obj) + + +def _strip_datapipe_from_name(name: str) -> str: + return name.replace("IterDataPipe", "").replace("MapDataPipe", "") + + +def _generate_input_args_string(obj): + """Generate a string for the input arguments of an object.""" + signature = inspect.signature(obj.__class__) + input_param_names = set() + for param_name in signature.parameters.keys(): + input_param_names.add(param_name) + result = [] + for name, value in inspect.getmembers(obj): + if name in input_param_names: + result.append((name, _simplify_obj_name(value))) + return ', '.join([f'{name}={value}' for name, value in result]) + + +def _generate_iterdatapipe_msg(datapipe, simplify_dp_name: bool = False): + output_string = f"{datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})" + if simplify_dp_name: + output_string = _strip_datapipe_from_name(output_string) + return output_string + + +def _gen_invalid_iterdatapipe_msg(datapipe): + return ("This iterator has been invalidated because another iterator has been created " + f"from the same IterDataPipe: {_generate_iterdatapipe_msg(datapipe)}\n" + "This may be caused multiple references to the same IterDataPipe. We recommend " + "using `.fork()` if that is necessary.") + + +_feedback_msg = ("\nFor feedback regarding this single iterator per IterDataPipe constraint, feel free " + "to comment on this issue: https://github.com/pytorch/data/issues/45.") + + +def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None: + r""" + Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception. + + In the case of ChildDataPipe, the ID gets compared to the one stored in `main_datapipe` as well. + """ + if next_method_exists: + # This is the case where `IterDataPipe` has both `__iter__` and `__next__`. + # The `_valid_iterator_id` should either be never set (`None`), or set by at most one + # iterator (`0`). Otherwise, it means there are multiple iterators. + if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0: + extra_msg = "\nNote that this exception is raised inside your IterDataPipe's a `__next__` method" + raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg) + elif hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True: + if hasattr(datapipe, "_check_valid_iterator_id"): + if not datapipe._check_valid_iterator_id(iterator_id): + raise RuntimeError("This iterator has been invalidated, because a new iterator has been created " + f"from one of the ChildDataPipes of " + f"{_generate_iterdatapipe_msg(datapipe.main_datapipe)}." + _feedback_msg) + else: + raise RuntimeError("ChildDataPipe must have method `_check_valid_iterator_id`.") + elif datapipe._valid_iterator_id != iterator_id: + raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg) + + +def _set_datapipe_valid_iterator_id(datapipe): + """Given a DataPipe, updates its valid iterator ID and reset the DataPipe.""" + if hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True: + if hasattr(datapipe, "_set_main_datapipe_valid_iterator_id"): + datapipe._set_main_datapipe_valid_iterator_id() # reset() is called within this method when appropriate + else: + raise RuntimeError("ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`.") + else: + if datapipe._valid_iterator_id is None: + datapipe._valid_iterator_id = 0 + else: + datapipe._valid_iterator_id += 1 + datapipe.reset() + return datapipe._valid_iterator_id + + +def hook_iterator(namespace): + r""" + Define a hook that is applied to all `__iter__` of metaclass `_DataPipeMeta`. + + This is done for the purpose of profiling and checking if an iterator is still valid. + """ + + def profiler_record_fn_context(datapipe): + if not hasattr(datapipe, "_profile_name"): + datapipe._profile_name = _generate_iterdatapipe_msg(datapipe, simplify_dp_name=True) + return torch.autograd.profiler.record_function(datapipe._profile_name) + + class IteratorDecorator: + r""" + Wrap the iterator and modifying its `__next__` method. + + This decorator is applied to DataPipes of which `__iter__` method is NOT a generator function. + Those `__iter__` method commonly returns `self` but not necessarily. + """ + + def __init__(self, iterator, datapipe, iterator_id, has_next_method): + self.iterator = iterator + self.datapipe = datapipe + self.iterator_id = iterator_id + self._profiler_enabled = torch.autograd._profiler_enabled() + # Check if `__iter__` returns `self` and `DataPipe` has `__next__` + self.self_and_has_next_method = self.iterator is self.datapipe and has_next_method + + def __iter__(self): + return self + + def _get_next(self): + """Return next with logic related to iterator validity, profiler, and incrementation of samples yielded.""" + _check_iterator_valid(self.datapipe, self.iterator_id) + result = next(self.iterator) + if not self.self_and_has_next_method: + self.datapipe._number_of_samples_yielded += 1 + return result + + def __next__(self): + # TODO: Add try-except to in-place reduce traceback from the Exception + # See: https://github.com/pytorch/data/issues/284 + if self._profiler_enabled: + with profiler_record_fn_context(self.datapipe): + return self._get_next() + else: # Decided against using `contextlib.nullcontext` for performance reasons + return self._get_next() + + def __getattr__(self, name): + return getattr(self.iterator, name) + + func = namespace['__iter__'] + + # ``__iter__`` of IterDataPipe is a generator function + if inspect.isgeneratorfunction(func): + @functools.wraps(func) + def wrap_generator(*args, **kwargs): + gen = func(*args, **kwargs) + datapipe = args[0] + if datapipe._fast_forward_iterator: + it = datapipe._fast_forward_iterator + datapipe._fast_forward_iterator = None + datapipe._snapshot_state = _SnapshotState.Iterating + while True: + try: + yield next(it) + except StopIteration: + return + iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator + _profiler_enabled = torch.autograd._profiler_enabled() + try: + if _profiler_enabled: + with profiler_record_fn_context(datapipe): + response = gen.send(None) + else: + response = gen.send(None) + + while True: + datapipe._number_of_samples_yielded += 1 + request = yield response + # Pass through here every time `__next__` is called + if _profiler_enabled: + with profiler_record_fn_context(datapipe): + _check_iterator_valid(datapipe, iterator_id) + response = gen.send(request) + else: # Decided against using `contextlib.nullcontext` for performance reasons + _check_iterator_valid(datapipe, iterator_id) + response = gen.send(request) + except StopIteration as e: + return + except Exception as e: + # TODO: Simplify the traceback message to skip over `response = gen.send(None)` + # Part of https://github.com/pytorch/data/issues/284 + datapipe = args[0] + msg = "thrown by __iter__ of" + single_iterator_msg = "single iterator per IterDataPipe constraint" + if hasattr(e.args, '__len__'): + full_msg = f"{msg} {datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})" + if len(e.args) == 0 or not isinstance(e.args[0], str): # If an exception message doesn't exist + e.args = (f'\nThis exception is {full_msg}',) + elif msg not in e.args[0] and single_iterator_msg not in e.args[0]: + e.args = (e.args[0] + f'\nThis exception is {full_msg}',) + e.args[1:] + raise + + namespace['__iter__'] = wrap_generator + else: # ``__iter__`` of IterDataPipe is NOT a generator function + # IterDataPipe is an iterator with both ``__iter__`` and ``__next__`` + # And ``__iter__`` may or may not return `self` + if '__next__' in namespace: # If `__next__` exists, put a wrapper around it + next_func = namespace['__next__'] + + @functools.wraps(next_func) + def wrap_next(*args, **kwargs): + datapipe = args[0] + if torch.autograd._profiler_enabled(): + with profiler_record_fn_context(datapipe): + result = next_func(*args, **kwargs) + else: + result = next_func(*args, **kwargs) + datapipe._number_of_samples_yielded += 1 + return result + + namespace['__next__'] = wrap_next + + # Note that if the `__next__` and `__iter__` do something completely unrelated. It may cause issue but + # the user will be violating the iterator protocol. Potential issue: + # 1. Valid iterator ID may not update or checked properly + # 2. The number of samples yielded will be miscounted + + # Regardless if `__next__` exists or not, `__iter__` needs a wrapper to track the number of valid iterators + @functools.wraps(func) + def wrap_iter(*args, **kwargs): + iter_ret = func(*args, **kwargs) + datapipe = args[0] + datapipe._snapshot_state = _SnapshotState.Iterating + if datapipe._fast_forward_iterator: + iter_ret = datapipe._fast_forward_iterator + datapipe._fast_forward_iterator = None + return iter_ret + iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator + return IteratorDecorator(iter_ret, datapipe, iterator_id, '__next__' in namespace) + + namespace['__iter__'] = wrap_iter diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..fdf2907abf1051d8eaf96eae89c679ebe9c57fb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py @@ -0,0 +1,430 @@ +# Taking reference from official Python typing +# https://github.com/python/cpython/blob/master/Lib/typing.py + +import collections +import functools +import numbers +import sys + +from torch.utils.data.datapipes._hook_iterator import hook_iterator, _SnapshotState +from typing import (Any, Dict, Iterator, Generic, List, Set, Tuple, TypeVar, Union, + get_type_hints) +from typing import _eval_type, _tp_cache, _type_check, _type_repr # type: ignore[attr-defined] +from typing import ForwardRef + +# TODO: Use TypeAlias when Python 3.6 is deprecated +# Please check [Note: TypeMeta and TypeAlias] +# In case of metaclass conflict due to ABCMeta or _ProtocolMeta +# For Python 3.9, only Protocol in typing uses metaclass +from abc import ABCMeta +from typing import _GenericAlias # type: ignore[attr-defined, no-redef] + +class GenericMeta(ABCMeta): # type: ignore[no-redef] + pass + + +class Integer(numbers.Integral): + pass + + +class Boolean(numbers.Integral): + pass + + +# Python 'type' object is not subscriptable +# Tuple[int, List, dict] -> valid +# tuple[int, list, dict] -> invalid +# Map Python 'type' to abstract base class +TYPE2ABC = { + bool: Boolean, + int: Integer, + float: numbers.Real, + complex: numbers.Complex, + dict: Dict, + list: List, + set: Set, + tuple: Tuple, + None: type(None), +} + + +def issubtype(left, right, recursive=True): + r""" + Check if the left-side type is a subtype of the right-side type. + + If any of type is a composite type like `Union` and `TypeVar` with + bounds, it would be expanded into a list of types and check all + of left-side types are subtypes of either one from right-side types. + """ + left = TYPE2ABC.get(left, left) + right = TYPE2ABC.get(right, right) + + if right is Any or left == right: + return True + + if isinstance(right, _GenericAlias): + if getattr(right, '__origin__', None) is Generic: + return True + + if right == type(None): + return False + + # Right-side type + constraints = _decompose_type(right) + + if len(constraints) == 0 or Any in constraints: + return True + + if left is Any: + return False + + # Left-side type + variants = _decompose_type(left) + + # all() will return True for empty variants + if len(variants) == 0: + return False + + return all(_issubtype_with_constraints(variant, constraints, recursive) for variant in variants) + + +def _decompose_type(t, to_list=True): + if isinstance(t, TypeVar): + if t.__bound__ is not None: + ts = [t.__bound__] + else: + # For T_co, __constraints__ is () + ts = list(t.__constraints__) + elif hasattr(t, '__origin__') and t.__origin__ == Union: + ts = t.__args__ + else: + if not to_list: + return None + ts = [t] + # Ignored: Generator has incompatible item type "object"; expected "Type[Any]" + ts = [TYPE2ABC.get(_t, _t) for _t in ts] # type: ignore[misc] + return ts + + +def _issubtype_with_constraints(variant, constraints, recursive=True): + r""" + Check if the variant is a subtype of either one from constraints. + + For composite types like `Union` and `TypeVar` with bounds, they + would be expanded for testing. + """ + if variant in constraints: + return True + + # [Note: Subtype for Union and TypeVar] + # Python typing is able to flatten Union[Union[...]] or Union[TypeVar]. + # But it couldn't flatten the following scenarios: + # - Union[int, TypeVar[Union[...]]] + # - TypeVar[TypeVar[...]] + # So, variant and each constraint may be a TypeVar or a Union. + # In these cases, all of inner types from the variant are required to be + # extraced and verified as a subtype of any constraint. And, all of + # inner types from any constraint being a TypeVar or a Union are + # also required to be extracted and verified if the variant belongs to + # any of them. + + # Variant + vs = _decompose_type(variant, to_list=False) + + # Variant is TypeVar or Union + if vs is not None: + return all(_issubtype_with_constraints(v, constraints, recursive) for v in vs) + + # Variant is not TypeVar or Union + if hasattr(variant, '__origin__') and variant.__origin__ is not None: + v_origin = variant.__origin__ + # In Python-3.9 typing library untyped generics do not have args + v_args = getattr(variant, "__args__", None) + else: + v_origin = variant + v_args = None + + # Constraints + for constraint in constraints: + cs = _decompose_type(constraint, to_list=False) + + # Constraint is TypeVar or Union + if cs is not None: + if _issubtype_with_constraints(variant, cs, recursive): + return True + # Constraint is not TypeVar or Union + else: + # __origin__ can be None for plain list, tuple, ... in Python 3.6 + if hasattr(constraint, '__origin__') and constraint.__origin__ is not None: + c_origin = constraint.__origin__ + if v_origin == c_origin: + if not recursive: + return True + # In Python-3.9 typing library untyped generics do not have args + c_args = getattr(constraint, "__args__", None) + if c_args is None or len(c_args) == 0: + return True + if v_args is not None and len(v_args) == len(c_args) and \ + all(issubtype(v_arg, c_arg) for v_arg, c_arg in zip(v_args, c_args)): + return True + # Tuple[int] -> Tuple + else: + if v_origin == constraint: + return True + + return False + + +def issubinstance(data, data_type): + if not issubtype(type(data), data_type, recursive=False): + return False + + # In Python-3.9 typing library __args__ attribute is not defined for untyped generics + dt_args = getattr(data_type, "__args__", None) + if isinstance(data, tuple): + if dt_args is None or len(dt_args) == 0: + return True + if len(dt_args) != len(data): + return False + return all(issubinstance(d, t) for d, t in zip(data, dt_args)) + elif isinstance(data, (list, set)): + if dt_args is None or len(dt_args) == 0: + return True + t = dt_args[0] + return all(issubinstance(d, t) for d in data) + elif isinstance(data, dict): + if dt_args is None or len(dt_args) == 0: + return True + kt, vt = dt_args + return all(issubinstance(k, kt) and issubinstance(v, vt) for k, v in data.items()) + + return True + + +# [Note: TypeMeta and TypeAlias] +# In order to keep compatibility for Python 3.6, use Meta for the typing. +# TODO: When PyTorch drops the support for Python 3.6, it can be converted +# into the Alias system and using `__class_getitem__` for DataPipe. The +# typing system will gain benefit of performance and resolving metaclass +# conflicts as elaborated in https://www.python.org/dev/peps/pep-0560/ + + +class _DataPipeType: + r"""Save type annotation in `param`.""" + + def __init__(self, param): + self.param = param + + def __repr__(self): + return _type_repr(self.param) + + def __eq__(self, other): + if isinstance(other, _DataPipeType): + return self.param == other.param + return NotImplemented + + def __hash__(self): + return hash(self.param) + + def issubtype(self, other): + if isinstance(other.param, _GenericAlias): + if getattr(other.param, '__origin__', None) is Generic: + return True + if isinstance(other, _DataPipeType): + return issubtype(self.param, other.param) + if isinstance(other, type): + return issubtype(self.param, other) + raise TypeError(f"Expected '_DataPipeType' or 'type', but found {type(other)}") + + def issubtype_of_instance(self, other): + return issubinstance(other, self.param) + + +# Default type for DataPipe without annotation +T_co = TypeVar('T_co', covariant=True) +_DEFAULT_TYPE = _DataPipeType(Generic[T_co]) + + +class _DataPipeMeta(GenericMeta): + r""" + Metaclass for `DataPipe`. + + Add `type` attribute and `__init_subclass__` based on the type, and validate the return hint of `__iter__`. + + Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`. + """ + + type: _DataPipeType + + def __new__(cls, name, bases, namespace, **kwargs): + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + # TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now. + cls.__origin__ = None + if 'type' in namespace: + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + namespace['__type_class__'] = False + # For plain derived class without annotation + for base in bases: + if isinstance(base, _DataPipeMeta): + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + namespace.update({'type': _DEFAULT_TYPE, + '__init_subclass__': _dp_init_subclass}) + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + def __init__(self, name, bases, namespace, **kwargs): + super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload] + + # TODO: Fix isinstance bug + @_tp_cache + def _getitem_(self, params): + if params is None: + raise TypeError(f'{self.__name__}[t]: t can not be None') + if isinstance(params, str): + params = ForwardRef(params) + if not isinstance(params, tuple): + params = (params, ) + + msg = f"{self.__name__}[t]: t must be a type" + params = tuple(_type_check(p, msg) for p in params) + + if isinstance(self.type.param, _GenericAlias): + orig = getattr(self.type.param, '__origin__', None) + if isinstance(orig, type) and orig is not Generic: + p = self.type.param[params] # type: ignore[index] + t = _DataPipeType(p) + l = len(str(self.type)) + 2 + name = self.__name__[:-l] + name = name + '[' + str(t) + ']' + bases = (self,) + self.__bases__ + return self.__class__(name, bases, + {'__init_subclass__': _dp_init_subclass, + 'type': t, + '__type_class__': True}) + + if len(params) > 1: + raise TypeError(f'Too many parameters for {self} actual {len(params)}, expected 1') + + t = _DataPipeType(params[0]) + + if not t.issubtype(self.type): + raise TypeError(f'Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]') + + # Types are equal, fast path for inheritance + if self.type == t: + return self + + name = self.__name__ + '[' + str(t) + ']' + bases = (self,) + self.__bases__ + + return self.__class__(name, bases, + {'__init_subclass__': _dp_init_subclass, + '__type_class__': True, + 'type': t}) + + # TODO: Fix isinstance bug + def _eq_(self, other): + if not isinstance(other, _DataPipeMeta): + return NotImplemented + if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type] + return self is other + return (self.__origin__ == other.__origin__ # type: ignore[has-type] + and self.type == other.type) + + # TODO: Fix isinstance bug + def _hash_(self): + return hash((self.__name__, self.type)) + + +class _IterDataPipeMeta(_DataPipeMeta): + r""" + Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`. + + Add various functions for behaviors specific to `IterDataPipe`. + """ + + def __new__(cls, name, bases, namespace, **kwargs): + + if 'reset' in namespace: + reset_func = namespace['reset'] + + @functools.wraps(reset_func) + def conditional_reset(*args, **kwargs): + r""" + Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating` or `NotStarted`. + + This allows recently restored DataPipe to preserve its restored state during the initial `__iter__` call. + """ + datapipe = args[0] + if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted): + # Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have + # already begun iterating. + datapipe._number_of_samples_yielded = 0 + datapipe._fast_forward_iterator = None + reset_func(*args, **kwargs) + datapipe._snapshot_state = _SnapshotState.Iterating + + namespace['reset'] = conditional_reset + + if '__iter__' in namespace: + hook_iterator(namespace) + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + +def _dp_init_subclass(sub_cls, *args, **kwargs): + # Add function for datapipe instance to reinforce the type + sub_cls.reinforce_type = reinforce_type + + # TODO: + # - add global switch for type checking at compile-time + + # Ignore internal type class + if getattr(sub_cls, '__type_class__', False): + return + + # Check if the string type is valid + if isinstance(sub_cls.type.param, ForwardRef): + base_globals = sys.modules[sub_cls.__module__].__dict__ + try: + param = _eval_type(sub_cls.type.param, base_globals, locals()) + sub_cls.type.param = param + except TypeError as e: + raise TypeError(f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing") from e + + if '__iter__' in sub_cls.__dict__: + iter_fn = sub_cls.__dict__['__iter__'] + hints = get_type_hints(iter_fn) + if 'return' in hints: + return_hint = hints['return'] + # Plain Return Hint for Python 3.6 + if return_hint == Iterator: + return + if not (hasattr(return_hint, '__origin__') and + (return_hint.__origin__ == Iterator or + return_hint.__origin__ == collections.abc.Iterator)): + raise TypeError("Expected 'Iterator' as the return annotation for `__iter__` of {}" + ", but found {}".format(sub_cls.__name__, _type_repr(hints['return']))) + data_type = return_hint.__args__[0] + if not issubtype(data_type, sub_cls.type.param): + raise TypeError("Expected return type of '__iter__' as a subtype of {}, but found {}" + " for {}".format(sub_cls.type, _type_repr(data_type), sub_cls.__name__)) + + +def reinforce_type(self, expected_type): + r""" + Reinforce the type for DataPipe instance. + + And the 'expected_type' is required to be a subtype of the original type + hint to restrict the type requirement of DataPipe instance. + """ + if isinstance(expected_type, tuple): + expected_type = Tuple[expected_type] + _type_check(expected_type, msg="'expected_type' must be a type") + + if not issubtype(expected_type, self.type.param): + raise TypeError(f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}") + + self.type = _DataPipeType(expected_type) + return self diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a4892032ea570532d6a262d70501e8827128d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py @@ -0,0 +1,11 @@ +from torch.utils.data.datapipes.dataframe.dataframes import ( + CaptureDataFrame, DFIterDataPipe, +) +from torch.utils.data.datapipes.dataframe.datapipes import ( + DataFramesAsTuplesPipe, +) + +__all__ = ['CaptureDataFrame', 'DFIterDataPipe', 'DataFramesAsTuplesPipe'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..293c72eabf337341cd29f042918ef45f70a95db2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframe_wrapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframe_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9b380cca1b6136a3b38af9cf8f846e177973e51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframe_wrapper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78aade880f6dbbb1f1f145a3f82b9cc7914ce1e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff7b1ea85582257352ff608722588b02e89fe64f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..478dbac6cb9578d26c8b1b52800a7148b378e05b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..3596cc171e5da567417535cedc4a174cd417cae1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py @@ -0,0 +1,125 @@ +from typing import Any, Optional + +_pandas: Any = None +_WITH_PANDAS: Optional[bool] = None + + +def _try_import_pandas() -> bool: + try: + import pandas # type: ignore[import] + global _pandas + _pandas = pandas + return True + except ImportError: + return False + + +# pandas used only for prototyping, will be shortly replaced with TorchArrow +def _with_pandas() -> bool: + global _WITH_PANDAS + if _WITH_PANDAS is None: + _WITH_PANDAS = _try_import_pandas() + return _WITH_PANDAS + + +class PandasWrapper: + @classmethod + def create_dataframe(cls, data, columns): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return _pandas.DataFrame(data, columns=columns) # type: ignore[union-attr] + + @classmethod + def is_dataframe(cls, data): + if not _with_pandas(): + return False + return isinstance(data, _pandas.core.frame.DataFrame) # type: ignore[union-attr] + + @classmethod + def is_column(cls, data): + if not _with_pandas(): + return False + return isinstance(data, _pandas.core.series.Series) # type: ignore[union-attr] + + @classmethod + def iterate(cls, data): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + yield from data.itertuples(index=False) + + @classmethod + def concat(cls, buffer): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return _pandas.concat(buffer) # type: ignore[union-attr] + + @classmethod + def get_item(cls, data, idx): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return data[idx: idx + 1] + + @classmethod + def get_len(cls, df): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return len(df.index) + + @classmethod + def get_columns(cls, df): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return list(df.columns.values.tolist()) + + +# When you build own implementation just override it with dataframe_wrapper.set_df_wrapper(new_wrapper_class) +default_wrapper = PandasWrapper + + +def get_df_wrapper(): + return default_wrapper + + +def set_df_wrapper(wrapper): + global default_wrapper + default_wrapper = wrapper + + +def create_dataframe(data, columns=None): + wrapper = get_df_wrapper() + return wrapper.create_dataframe(data, columns) + + +def is_dataframe(data): + wrapper = get_df_wrapper() + return wrapper.is_dataframe(data) + + +def get_columns(data): + wrapper = get_df_wrapper() + return wrapper.get_columns(data) + + +def is_column(data): + wrapper = get_df_wrapper() + return wrapper.is_column(data) + + +def concat(buffer): + wrapper = get_df_wrapper() + return wrapper.concat(buffer) + + +def iterate(data): + wrapper = get_df_wrapper() + return wrapper.iterate(data) + + +def get_item(data, idx): + wrapper = get_df_wrapper() + return wrapper.get_item(data, idx) + + +def get_len(df): + wrapper = get_df_wrapper() + return wrapper.get_len(df) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py new file mode 100644 index 0000000000000000000000000000000000000000..69a14e06fcbf7db40fe415fc70cf9c28cec3fc73 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py @@ -0,0 +1,433 @@ +from typing import Any, Dict, List, Optional + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe + +from torch.utils.data.datapipes.dataframe.structures import DataChunkDF + +# TODO(VitalyFedyunin): Add error when two different traces get combined + +__all__ = [ + "Capture", + "CaptureA", + "CaptureAdd", + "CaptureCall", + "CaptureControl", + "CaptureDataFrame", + "CaptureDataFrameWithDataPipeOps", + "CaptureF", + "CaptureGetAttr", + "CaptureGetItem", + "CaptureInitial", + "CaptureLikeMock", + "CaptureMul", + "CaptureSetItem", + "CaptureSub", + "CaptureVariable", + "CaptureVariableAssign", + "DataFrameTracer", + "DataFrameTracedOps", + "disable_capture", + "get_val", +] + + +def disable_capture(): + CaptureControl.disabled = True + + +class CaptureControl: + disabled = False + + +class DataFrameTracedOps(DFIterDataPipe): + def __init__(self, source_datapipe, output_var): + self.source_datapipe = source_datapipe + self.output_var = output_var + + def __iter__(self): + for item in self.source_datapipe: + yield self.output_var.apply_ops(item) + + +# TODO(VitalyFedyunin): Extract this list from the DFIterDataPipe registred functions +DATAPIPES_OPS = ['_dataframes_as_tuples', 'groupby', '_dataframes_filter', 'map', 'to_datapipe', + 'shuffle', 'concat', 'batch', '_dataframes_per_row', '_dataframes_concat', '_dataframes_shuffle'] + +UNIMPLEMENTED_ATTR = ['__deepcopy__', '__setstate__', 'is_shardable', 'apply_sharding'] + + +class Capture: + # TODO: All operations are shared across entire InitialCapture, need to figure out what if we join two captures + + def __init__(self, schema_df=None): + self.ctx = {'operations': [], 'variables': [], 'schema_df': schema_df} + + def __str__(self): + return self._ops_str() + + def _ops_str(self): + res = "" + for op in self.ctx['operations']: + if len(res) > 0: + res += "\n" + res += str(op) + return res + + def __getstate__(self): + # TODO(VitalyFedyunin): Currently can't pickle (why?) + self.ctx['schema_df'] = None + for var in self.ctx['variables']: + var.calculated_value = None + state = {} + for item in self.__dict__: + state[item] = getattr(self, item) + return state + + def __setstate__(self, state): + for k, v in state.items(): + setattr(self, k, v) + + def __getattr__(self, attrname): + if attrname == 'kwarg' or attrname == 'kwargs': + raise Exception('no kwargs!') + if attrname in ['__deepcopy__']: + raise AttributeError() + result = CaptureGetAttr(self, attrname, ctx=self.ctx) + return result + + def __getitem__(self, key): + return CaptureGetItem(self, key, ctx=self.ctx) + + def __setitem__(self, key, value): + self.ctx['operations'].append( + CaptureSetItem(self, key, value, ctx=self.ctx)) + + def __add__(self, add_val): + res = CaptureAdd(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + self.ctx['operations'].append( + CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)) + return var + + def __sub__(self, add_val): + res = CaptureSub(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + self.ctx['operations'].append( + CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)) + return var + + def __mul__(self, add_val): + res = CaptureMul(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx) + self.ctx['operations'].append(t) + return var + + def _is_context_empty(self): + return len(self.ctx['operations']) == 0 and len(self.ctx['variables']) == 0 + + def apply_ops_2(self, dataframe): + # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer) + self.ctx['variables'][0].calculated_value = dataframe + for op in self.ctx['operations']: + op.execute() + + @property + def columns(self): + self.apply_ops_2(self.ctx['schema_df']) + value = self.execute() + return value.columns + + # TODO(VitalyFedyunin): Add tests + # TODO(VitalyFedyunin): Need to join context if one of them are empty because we used capture + + def __call__(self, *args, **kwargs): + # TODO: Check if args or kwargs have more than one different context + if self._is_context_empty(): + # TODO: Allow CaptureA to take context from mock + for arg in args: + if isinstance(arg, Capture) and not arg._is_context_empty(): + self.ctx = arg.ctx + break + if self._is_context_empty(): + for k, v in kwargs.items(): + if isinstance(k, Capture) and not k._is_context_empty(): + self.ctx = k.ctx + break + if isinstance(v, Capture) and not v._is_context_empty(): + self.ctx = v.ctx + break + + res = CaptureCall(self, ctx=self.ctx, args=args, kwargs=kwargs) + var = CaptureVariable(None, ctx=self.ctx) + t = CaptureVariableAssign(ctx=self.ctx, variable=var, value=res) + self.ctx['operations'].append(t) + return var + + +class CaptureF(Capture): + def __init__(self, ctx=None, **kwargs): + if ctx is None: + self.ctx = {'operations': [], 'variables': []} + else: + self.ctx = ctx + self.kwargs = kwargs + + +class CaptureA(CaptureF): + def __str__(self): + return f"{self.kwargs['name']}" + + def execute(self): + value = self.kwargs['real_attribute'] + return value + + +class CaptureLikeMock: + def __init__(self, name): + import unittest.mock as mock + # TODO(VitalyFedyunin): Do not use provate function here, copy own implementation instead. + get_target, attribute = mock._get_target(name) # type: ignore[attr-defined] + self.get_target = get_target + self.attribute = attribute + self.name = name + + def __enter__(self): + self.save = getattr(self.get_target(), self.attribute) + capt = CaptureA(name=self.name, real_attribute=self.save) + setattr(self.get_target(), self.attribute, capt) + + def __exit__(self, *exc_info): + setattr(self.get_target(), self.attribute, self.save) + + +class CaptureCall(Capture): + + def __init__(self, callable, ctx=None, **kwargs): + if ctx is None: + self.ctx = {'operations': [], 'variables': []} + else: + self.ctx = ctx + self.kwargs = kwargs + self.callable = callable + + def __str__(self): + return "{callable}({args},{kwargs})".format(callable=self.callable, **self.kwargs) + + def execute(self): + + # TODO: VitalyFedyunin execute kwargs and maybe nested structures + executed_args = [] + for arg in self.kwargs['args']: + if isinstance(arg, Capture): + executed_args.append(arg.execute()) + else: + executed_args.append(arg) + left = get_val(self.callable) + return left(*executed_args, **self.kwargs['kwargs']) + + +class CaptureVariableAssign(CaptureF): + def __str__(self): + variable = self.kwargs['variable'] + value = self.kwargs['value'] + return f"{variable} = {value}" + + def execute(self): + self.kwargs['variable'].calculated_value = self.kwargs['value'].execute() + + +class CaptureVariable(Capture): + # TODO(VitalyFedyunin): This should be atomic and thread safe + names_idx = 0 + + def __init__(self, value, ctx): + if CaptureControl.disabled: + raise Exception('Attempting to create capture variable with capture off') + self.ctx = ctx + self.value = value + self.name = f'var_{CaptureVariable.names_idx}' + CaptureVariable.names_idx += 1 + self.ctx['variables'].append(self) + + def __str__(self): + return self.name + + def execute(self): + return self.calculated_value + + def apply_ops(self, dataframe): + # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer) + self.ctx['variables'][0].calculated_value = dataframe + for op in self.ctx['operations']: + op.execute() + return self.calculated_value + + +class CaptureGetItem(Capture): + def __init__(self, left, key, ctx): + self.ctx = ctx + self.left = left + self.key = key + + def __str__(self): + return f"{self.left}[{get_val(self.key)}]" + + def execute(self): + left = self.left.execute() + return left[self.key] + + +class CaptureSetItem(Capture): + def __init__(self, left, key, value, ctx): + self.ctx = ctx + self.left = left + self.key = key + self.value = value + + def __str__(self): + return f"{self.left}[{get_val(self.key)}] = {self.value}" + + def execute(self): + left = self.left.execute() + value = self.value.execute() + left[self.key] = value + + +class CaptureAdd(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} + {self.right}" + + def execute(self): + return get_val(self.left) + get_val(self.right) + + +class CaptureMul(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} * {self.right}" + + def execute(self): + return get_val(self.left) * get_val(self.right) + + +class CaptureSub(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} - {self.right}" + + def execute(self): + return get_val(self.left) - get_val(self.right) + + +class CaptureGetAttr(Capture): + def __init__(self, src, name, ctx): + self.ctx = ctx + self.src = src + self.name = name + + def __str__(self): + return f"{self.src}.{self.name}" + + def execute(self): + val = get_val(self.src) + return getattr(val, self.name) + + +def get_val(capture): + if isinstance(capture, Capture): + return capture.execute() + elif isinstance(capture, str): + return f'"{capture}"' + else: + return capture + + +class CaptureInitial(CaptureVariable): + def __init__(self, schema_df=None): + new_ctx: Dict[str, List[Any]] = {'operations': [], 'variables': [], 'schema_df': schema_df} + super().__init__(None, new_ctx) + self.name = f'input_{self.name}' + + +class CaptureDataFrame(CaptureInitial): + pass + + +class CaptureDataFrameWithDataPipeOps(CaptureDataFrame): + def as_datapipe(self): + return DataFrameTracedOps( + self.ctx['variables'][0].source_datapipe, self) + + def raw_iterator(self): + return self.as_datapipe().__iter__() + + def __iter__(self): + return iter(self._dataframes_as_tuples()) + + def batch(self, batch_size=10, drop_last: bool = False, wrapper_class=DataChunkDF): + dp = self._dataframes_per_row()._dataframes_concat(batch_size) + dp = dp.as_datapipe().batch(1, drop_last=drop_last, wrapper_class=wrapper_class) + dp._dp_contains_dataframe = True + return dp + + def groupby(self, + group_key_fn, + *, + buffer_size=10000, + group_size=None, + guaranteed_group_size=None, + drop_remaining=False): + dp = self._dataframes_per_row() + dp = dp.as_datapipe().groupby(group_key_fn, buffer_size=buffer_size, group_size=group_size, + guaranteed_group_size=guaranteed_group_size, drop_remaining=drop_remaining) + return dp + + def shuffle(self, *args, **kwargs): + return self._dataframes_shuffle(*args, **kwargs) + + def filter(self, *args, **kwargs): + return self._dataframes_filter(*args, **kwargs) + + def collate(self, *args, **kwargs): + raise Exception("Can't collate unbatched DataFrames stream") + + def __getattr__(self, attrname): # ? + if attrname in UNIMPLEMENTED_ATTR: + raise AttributeError('Attempting to get ', attrname) + if attrname in DATAPIPES_OPS: + return (self.as_datapipe()).__getattr__(attrname) + return super().__getattr__(attrname) + + +@functional_datapipe('trace_as_dataframe') +class DataFrameTracer(CaptureDataFrameWithDataPipeOps, IterDataPipe): # type: ignore[misc] + source_datapipe: Optional[Any] = None + + # TODO(VitalyFedyunin): Must implement all special functions of datapipes + + def set_shuffle_settings(self, *args, **kwargs): + pass + + def is_shardable(self): + return False + + def __init__(self, source_datapipe, schema_df=None): + self.source_datapipe = source_datapipe + if schema_df is None: + schema_df = next(iter(self.source_datapipe)) + super().__init__(schema_df=schema_df) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py new file mode 100644 index 0000000000000000000000000000000000000000..a75cc5c7a7c210d67cbc6291dcf892576669eb2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py @@ -0,0 +1,131 @@ +import random + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe + +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper + +__all__ = [ + "ConcatDataFramesPipe", + "DataFramesAsTuplesPipe", + "ExampleAggregateAsDataFrames", + "FilterDataFramesPipe", + "PerRowDataFramesPipe", + "ShuffleDataFramesPipe", +] + + +@functional_datapipe('_dataframes_as_tuples') +class DataFramesAsTuplesPipe(IterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + for df in self.source_datapipe: + # for record in df.to_records(index=False): + yield from df_wrapper.iterate(df) + + +@functional_datapipe('_dataframes_per_row', enable_df_api_tracing=True) +class PerRowDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + for df in self.source_datapipe: + # TODO(VitalyFedyunin): Replacing with TorchArrow only API, as we are dropping pandas as followup + for i in range(len(df)): + yield df[i:i + 1] + + +@functional_datapipe('_dataframes_concat', enable_df_api_tracing=True) +class ConcatDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe, batch=3): + self.source_datapipe = source_datapipe + self.n_batch = batch + + def __iter__(self): + buffer = [] + for df in self.source_datapipe: + buffer.append(df) + if len(buffer) == self.n_batch: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_dataframes_shuffle', enable_df_api_tracing=True) +class ShuffleDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + size = None + all_buffer = [] + for df in self.source_datapipe: + if size is None: + size = df_wrapper.get_len(df) + for i in range(df_wrapper.get_len(df)): + all_buffer.append(df_wrapper.get_item(df, i)) + random.shuffle(all_buffer) + buffer = [] + for df in all_buffer: + buffer.append(df) + if len(buffer) == size: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_dataframes_filter', enable_df_api_tracing=True) +class FilterDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe, filter_fn): + self.source_datapipe = source_datapipe + self.filter_fn = filter_fn + + def __iter__(self): + size = None + all_buffer = [] + filter_res = [] + for df in self.source_datapipe: + if size is None: + size = len(df.index) + for i in range(len(df.index)): + all_buffer.append(df[i:i + 1]) + filter_res.append(self.filter_fn(df.iloc[i])) + + buffer = [] + for df, res in zip(all_buffer, filter_res): + if res: + buffer.append(df) + if len(buffer) == size: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_to_dataframes_pipe', enable_df_api_tracing=True) +class ExampleAggregateAsDataFrames(DFIterDataPipe): + def __init__(self, source_datapipe, dataframe_size=10, columns=None): + self.source_datapipe = source_datapipe + self.columns = columns + self.dataframe_size = dataframe_size + + def _as_list(self, item): + try: + return list(item) + except Exception: # TODO(VitalyFedyunin): Replace with better iterable exception + return [item] + + def __iter__(self): + aggregate = [] + for item in self.source_datapipe: + aggregate.append(self._as_list(item)) + if len(aggregate) == self.dataframe_size: + yield df_wrapper.create_dataframe(aggregate, columns=self.columns) + aggregate = [] + if len(aggregate) > 0: + yield df_wrapper.create_dataframe(aggregate, columns=self.columns) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py new file mode 100644 index 0000000000000000000000000000000000000000..507a04e491d30e36b1f4bbec2a676efc53db7194 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py @@ -0,0 +1,18 @@ +from torch.utils.data.datapipes.datapipe import DataChunk +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper + +__all__ = ["DataChunkDF", ] + + +class DataChunkDF(DataChunk): + """DataChunkDF iterating over individual items inside of DataFrame containers, to access DataFrames user `raw_iterator`.""" + + def __iter__(self): + for df in self.items: + yield from df_wrapper.iterate(df) + + def __len__(self): + total_len = 0 + for df in self.items: + total_len += df_wrapper.get_len(df) + return total_len diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py new file mode 100644 index 0000000000000000000000000000000000000000..c6d9baf95ae7dac626dae135305f21454aff96d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py @@ -0,0 +1,404 @@ +import functools +import pickle +from typing import Dict, Callable, Optional, TypeVar, Generic, Iterator + +from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes.utils.common import ( + _deprecation_warning, + _iter_deprecated_functional_names, + _map_deprecated_functional_names, +) +from torch.utils.data.dataset import Dataset, IterableDataset +from torch.utils._import_utils import import_dill + +dill = import_dill() +HAS_DILL = dill is not None + +__all__ = [ + "DataChunk", + "DFIterDataPipe", + "IterDataPipe", + "MapDataPipe", +] + +T = TypeVar('T') +T_co = TypeVar('T_co', covariant=True) + +UNTRACABLE_DATAFRAME_PIPES = ['batch', # As it returns DataChunks + 'groupby', # As it returns DataChunks + '_dataframes_as_tuples', # As it unpacks DF + 'trace_as_dataframe', # As it used to mark DF for tracing + ] + + +class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): + r""" + Iterable-style DataPipe. + + All DataPipes that represent an iterable of data samples should subclass this. + This style of DataPipes is particularly useful when data come from a stream, or + when the number of samples is too large to fit them all in memory. ``IterDataPipe`` is lazily initialized and its + elements are computed only when ``next()`` is called on the iterator of an ``IterDataPipe``. + + All subclasses should overwrite :meth:`__iter__`, which would return an + iterator of samples in this DataPipe. Calling ``__iter__`` of an ``IterDataPipe`` automatically invokes its + method ``reset()``, which by default performs no operation. When writing a custom ``IterDataPipe``, users should + override ``reset()`` if necessary. The common usages include resetting buffers, pointers, + and various state variables within the custom ``IterDataPipe``. + + Note: + Only `one` iterator can be valid for each ``IterDataPipe`` at a time, + and the creation a second iterator will invalidate the first one. This constraint is necessary because + some ``IterDataPipe`` have internal buffers, whose states can become invalid if there are multiple iterators. + The code example below presents details on how this constraint looks in practice. + If you have any feedback related to this constraint, please see `GitHub IterDataPipe Single Iterator Issue`_. + + These DataPipes can be invoked in two ways, using the class constructor or applying their + functional form onto an existing ``IterDataPipe`` (recommended, available to most but not all DataPipes). + You can chain multiple `IterDataPipe` together to form a pipeline that will perform multiple + operations in succession. + + .. _GitHub IterDataPipe Single Iterator Issue: + https://github.com/pytorch/data/issues/45 + + Note: + When a subclass is used with :class:`~torch.utils.data.DataLoader`, each + item in the DataPipe will be yielded from the :class:`~torch.utils.data.DataLoader` + iterator. When :attr:`num_workers > 0`, each worker process will have a + different copy of the DataPipe object, so it is often desired to configure + each copy independently to avoid having duplicate data returned from the + workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker + process, returns information about the worker. It can be used in either the + dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's + :attr:`worker_init_fn` option to modify each copy's behavior. + + Examples: + General Usage: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> dp = IterableWrapper(range(10)) + >>> map_dp_1 = Mapper(dp, lambda x: x + 1) # Using class constructor + >>> map_dp_2 = dp.map(lambda x: x + 1) # Using functional form (recommended) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> filter_dp = map_dp_1.filter(lambda x: x % 2 == 0) + >>> list(filter_dp) + [2, 4, 6, 8, 10] + Single Iterator Constraint Example: + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> source_dp = IterableWrapper(range(10)) + >>> it1 = iter(source_dp) + >>> list(it1) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> it1 = iter(source_dp) + >>> it2 = iter(source_dp) # The creation of a new iterator invalidates `it1` + >>> next(it2) + 0 + >>> next(it1) # Further usage of `it1` will raise a `RunTimeError` + """ + + functions: Dict[str, Callable] = {} + reduce_ex_hook: Optional[Callable] = None + getstate_hook: Optional[Callable] = None + str_hook: Optional[Callable] = None + repr_hook: Optional[Callable] = None + _valid_iterator_id: Optional[int] = None + _number_of_samples_yielded: int = 0 + _snapshot_state: _SnapshotState = _SnapshotState.NotStarted + _fast_forward_iterator: Optional[Iterator] = None + + def __iter__(self) -> Iterator[T_co]: + return self + + def __getattr__(self, attribute_name): + if attribute_name in IterDataPipe.functions: + if attribute_name in _iter_deprecated_functional_names: + kwargs = _iter_deprecated_functional_names[attribute_name] + _deprecation_warning(**kwargs) + f = IterDataPipe.functions[attribute_name] + function = functools.partial(f, self) + functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",)) + return function + else: + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute_name}") + + @classmethod + def register_function(cls, function_name, function): + cls.functions[function_name] = function + + @classmethod + def register_datapipe_as_function(cls, function_name, cls_to_register, enable_df_api_tracing=False): + if function_name in cls.functions: + raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken") + + def class_function(cls, enable_df_api_tracing, source_dp, *args, **kwargs): + result_pipe = cls(source_dp, *args, **kwargs) + if isinstance(result_pipe, IterDataPipe): + if enable_df_api_tracing or isinstance(source_dp, DFIterDataPipe): + if function_name not in UNTRACABLE_DATAFRAME_PIPES: + result_pipe = result_pipe.trace_as_dataframe() + + return result_pipe + + function = functools.partial( + class_function, cls_to_register, enable_df_api_tracing + ) + functools.update_wrapper( + wrapper=function, wrapped=cls_to_register, assigned=("__doc__",) + ) + cls.functions[function_name] = function + + def __getstate__(self): + """ + Serialize `lambda` functions when `dill` is available. + + If this doesn't cover your custom DataPipe's use case, consider writing custom methods for + `__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization. + """ + state = self.__dict__ + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __reduce_ex__(self, *args, **kwargs): + if IterDataPipe.reduce_ex_hook is not None: + try: + return IterDataPipe.reduce_ex_hook(self) + except NotImplementedError: + pass + return super().__reduce_ex__(*args, **kwargs) + + @classmethod + def set_getstate_hook(cls, hook_fn): + if IterDataPipe.getstate_hook is not None and hook_fn is not None: + raise Exception("Attempt to override existing getstate_hook") + IterDataPipe.getstate_hook = hook_fn + + @classmethod + def set_reduce_ex_hook(cls, hook_fn): + if IterDataPipe.reduce_ex_hook is not None and hook_fn is not None: + raise Exception("Attempt to override existing reduce_ex_hook") + IterDataPipe.reduce_ex_hook = hook_fn + + def __repr__(self): + if self.repr_hook is not None: + return self.repr_hook(self) + # Instead of showing , return the class name + return str(self.__class__.__qualname__) + + def __str__(self): + if self.str_hook is not None: + return self.str_hook(self) + # Instead of showing , return the class name + return str(self.__class__.__qualname__) + + def __dir__(self): + # for auto-completion in a REPL (e.g. Jupyter notebook) + return list(super().__dir__()) + list(self.functions.keys()) + + def reset(self) -> None: + r""" + Reset the `IterDataPipe` to the initial state. + + By default, no-op. For subclasses of `IterDataPipe`, depending on their functionalities, + they may want to override this method with implementations that + may clear the buffers and reset pointers of the DataPipe. + The `reset` method is always called when `__iter__` is called as part of `hook_iterator`. + """ + pass + + +class DFIterDataPipe(IterDataPipe): + def _is_dfpipe(self): + return True + + +class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): + r""" + Map-style DataPipe. + + All datasets that represent a map from keys to data samples should subclass this. + Subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given, unique key. Subclasses can also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. + + These DataPipes can be invoked in two ways, using the class constructor or applying their + functional form onto an existing `MapDataPipe` (recommend, available to most but not all DataPipes). + + Note: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + DataPipe with non-integral indices/keys, a custom sampler must be provided. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper, Mapper + >>> dp = SequenceWrapper(range(10)) + >>> map_dp_1 = dp.map(lambda x: x + 1) # Using functional form (recommended) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) # Using class constructor + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> batch_dp = map_dp_1.batch(batch_size=2) + >>> list(batch_dp) + [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + """ + + functions: Dict[str, Callable] = {} + reduce_ex_hook: Optional[Callable] = None + getstate_hook: Optional[Callable] = None + str_hook: Optional[Callable] = None + repr_hook: Optional[Callable] = None + + def __getattr__(self, attribute_name): + if attribute_name in MapDataPipe.functions: + if attribute_name in _map_deprecated_functional_names: + kwargs = _map_deprecated_functional_names[attribute_name] + _deprecation_warning(**kwargs) + f = MapDataPipe.functions[attribute_name] + function = functools.partial(f, self) + functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",)) + return function + else: + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute_name}") + + @classmethod + def register_function(cls, function_name, function): + cls.functions[function_name] = function + + @classmethod + def register_datapipe_as_function(cls, function_name, cls_to_register): + if function_name in cls.functions: + raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken") + + def class_function(cls, source_dp, *args, **kwargs): + result_pipe = cls(source_dp, *args, **kwargs) + return result_pipe + + function = functools.partial(class_function, cls_to_register) + functools.update_wrapper( + wrapper=function, wrapped=cls_to_register, assigned=("__doc__",) + ) + cls.functions[function_name] = function + + def __getstate__(self): + """ + Serialize `lambda` functions when `dill` is available. + + If this doesn't cover your custom DataPipe's use case, consider writing custom methods for + `__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization. + """ + state = self.__dict__ + if MapDataPipe.getstate_hook is not None: + return MapDataPipe.getstate_hook(state) + return state + + def __reduce_ex__(self, *args, **kwargs): + if MapDataPipe.reduce_ex_hook is not None: + try: + return MapDataPipe.reduce_ex_hook(self) + except NotImplementedError: + pass + return super().__reduce_ex__(*args, **kwargs) + + @classmethod + def set_getstate_hook(cls, hook_fn): + if MapDataPipe.getstate_hook is not None and hook_fn is not None: + raise Exception("Attempt to override existing getstate_hook") + MapDataPipe.getstate_hook = hook_fn + + @classmethod + def set_reduce_ex_hook(cls, hook_fn): + if MapDataPipe.reduce_ex_hook is not None and hook_fn is not None: + raise Exception("Attempt to override existing reduce_ex_hook") + MapDataPipe.reduce_ex_hook = hook_fn + + def __repr__(self): + if self.repr_hook is not None: + return self.repr_hook(self) + # Instead of showing , return the class name + return str(self.__class__.__qualname__) + + def __str__(self): + if self.str_hook is not None: + return self.str_hook(self) + # Instead of showing , return the class name + return str(self.__class__.__qualname__) + + def __dir__(self): + # for auto-completion in a REPL (e.g. Jupyter notebook) + return list(super().__dir__()) + list(self.functions.keys()) + + + +class _DataPipeSerializationWrapper: + def __init__(self, datapipe): + self._datapipe = datapipe + + def __getstate__(self): + use_dill = False + try: + value = pickle.dumps(self._datapipe) + except Exception: + if HAS_DILL: + value = dill.dumps(self._datapipe) + use_dill = True + else: + raise + return (value, use_dill) + + def __setstate__(self, state): + value, use_dill = state + if use_dill: + self._datapipe = dill.loads(value) + else: + self._datapipe = pickle.loads(value) + + def __len__(self): + try: + return len(self._datapipe) + except Exception as e: + raise TypeError( + f"{type(self).__name__} instance doesn't have valid length" + ) from e + + +class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe): + def __init__(self, datapipe: IterDataPipe[T_co]): + super().__init__(datapipe) + self._datapipe_iter: Optional[Iterator[T_co]] = None + + def __iter__(self) -> "_IterDataPipeSerializationWrapper": + self._datapipe_iter = iter(self._datapipe) + return self + + def __next__(self) -> T_co: # type: ignore[type-var] + assert self._datapipe_iter is not None + return next(self._datapipe_iter) + + +class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe): + def __getitem__(self, idx): + return self._datapipe[idx] + + +class DataChunk(list, Generic[T]): + def __init__(self, items): + super().__init__(items) + self.items = items + + def as_str(self, indent=''): + res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]" + return res + + def __iter__(self) -> Iterator[T]: + yield from super().__iter__() + + def raw_iterator(self) -> T: # type: ignore[misc] + yield from self.items diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi new file mode 100644 index 0000000000000000000000000000000000000000..34e80bcb95f5e487c8fd7c9e8dcb01db56307bc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi @@ -0,0 +1,689 @@ +# This base template ("datapipe.pyi.in") is generated from mypy stubgen with minimal editing for code injection +# The output file will be "datapipe.pyi". This is executed as part of torch/CMakeLists.txt +# Note that, for mypy, .pyi file takes precedent over .py file, such that we must define the interface for other +# classes/objects here, even though we are not injecting extra code into them at the moment. + +from typing import Any, Callable, Dict, Generic, Iterator, List, Literal, Optional, TypeVar, Union + +from torch.utils.data import Dataset, default_collate, IterableDataset +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta + +T_co = TypeVar("T_co", covariant=True) +T = TypeVar("T") +UNTRACABLE_DATAFRAME_PIPES: Any + +class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): + functions: Dict[str, Callable] = ... + reduce_ex_hook: Optional[Callable] = ... + getstate_hook: Optional[Callable] = ... + str_hook: Optional[Callable] = ... + repr_hook: Optional[Callable] = ... + def __getattr__(self, attribute_name: Any): ... + @classmethod + def register_function(cls, function_name: Any, function: Any) -> None: ... + @classmethod + def register_datapipe_as_function( + cls, + function_name: Any, + cls_to_register: Any, + ): ... + def __getstate__(self): ... + def __reduce_ex__(self, *args: Any, **kwargs: Any): ... + @classmethod + def set_getstate_hook(cls, hook_fn: Any) -> None: ... + @classmethod + def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ... + # Functional form of 'BatcherMapDataPipe' + def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> MapDataPipe: + r""" + Create mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, + or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> batch_dp = dp.batch(batch_size=2) + >>> list(batch_dp) + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] + """ + + # Functional form of 'ConcaterMapDataPipe' + def concat(self, *datapipes: MapDataPipe) -> MapDataPipe: + r""" + Concatenate multiple Map DataPipes (functional name: ``concat``). + + The new index of is the cumulative sum of source DataPipes. + For example, if there are 2 source DataPipes both with length 5, + index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to + elements of the first DataPipe, and 5 to 9 would refer to elements + of the second DataPipe. + + Args: + datapipes: Map DataPipes being concatenated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(3)) + >>> concat_dp = dp1.concat(dp2) + >>> list(concat_dp) + [0, 1, 2, 0, 1, 2] + """ + + # Functional form of 'MapperMapDataPipe' + def map(self, fn: Callable= ...) -> MapDataPipe: + r""" + Apply the input function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source MapDataPipe + fn: Function being applied to each item + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = SequenceWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + # Functional form of 'ShufflerIterDataPipe' + def shuffle(self, *, indices: Optional[List] = None) -> IterDataPipe: + r""" + Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``). + + When it is used with :class:`~torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed + for each worker process. + + Args: + datapipe: MapDataPipe being shuffled + indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> shuffle_dp = dp.shuffle().set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + >>> list(shuffle_dp) + [6, 1, 9, 5, 2, 4, 7, 3, 8, 0] + >>> # Reset seed for Shuffler + >>> shuffle_dp = shuffle_dp.set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + + Note: + Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an + ``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to + the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order + of data during data-processing. + """ + + # Functional form of 'ZipperMapDataPipe' + def zip(self, *datapipes: MapDataPipe[T_co]) -> MapDataPipe: + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Map DataPipes being aggregated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(10, 13)) + >>> zip_dp = dp1.zip(dp2) + >>> list(zip_dp) + [(0, 10), (1, 11), (2, 12)] + """ + + +class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): + functions: Dict[str, Callable] = ... + reduce_ex_hook: Optional[Callable] = ... + getstate_hook: Optional[Callable] = ... + str_hook: Optional[Callable] = ... + repr_hook: Optional[Callable] = ... + _number_of_samples_yielded: int = ... + _snapshot_state: _SnapshotState = _SnapshotState.Iterating + _fast_forward_iterator: Optional[Iterator] = ... + def __getattr__(self, attribute_name: Any): ... + @classmethod + def register_function(cls, function_name: Any, function: Any) -> None: ... + @classmethod + def register_datapipe_as_function( + cls, + function_name: Any, + cls_to_register: Any, + enable_df_api_tracing: bool = ..., + ): ... + def __getstate__(self): ... + def __reduce_ex__(self, *args: Any, **kwargs: Any): ... + @classmethod + def set_getstate_hook(cls, hook_fn: Any) -> None: ... + @classmethod + def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ... + # Functional form of 'BatcherIterDataPipe' + def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> IterDataPipe: + r""" + Creates mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the + last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding, + defaults to ``DataChunk`` + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> dp = dp.batch(batch_size=3, drop_last=True) + >>> list(dp) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + # Functional form of 'CollatorIterDataPipe' + def collate(self, conversion: Optional[Union[Callable[..., Any],Dict[Union[str, Any], Union[Callable, Any]],]] = default_collate, collate_fn: Optional[Callable] = None) -> IterDataPipe: + r""" + Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``). + + By default, it uses :func:`torch.utils.data.default_collate`. + + .. note:: + While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the + default behavior and `functools.partial` to specify any additional arguments. + + Args: + datapipe: Iterable DataPipe being collated + collate_fn: Customized collate function to collect and combine data or a batch of data. + Default function collates to Tensor(s) based on data type. + + Example: + >>> # xdoctest: +SKIP + >>> # Convert integer data to float Tensor + >>> class MyIterDataPipe(torch.utils.data.IterDataPipe): + ... def __init__(self, start, end): + ... super(MyIterDataPipe).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + ... def __len__(self): + ... return self.end - self.start + ... + >>> ds = MyIterDataPipe(start=3, end=7) + >>> print(list(ds)) + [3, 4, 5, 6] + >>> def collate_fn(batch): + ... return torch.tensor(batch, dtype=torch.float) + ... + >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn) + >>> print(list(collated_ds)) + [tensor(3.), tensor(4.), tensor(5.), tensor(6.)] + """ + + # Functional form of 'ConcaterIterDataPipe' + def concat(self, *datapipes: IterDataPipe) -> IterDataPipe: + r""" + Concatenates multiple Iterable DataPipes (functional name: ``concat``). + + The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones. + + Args: + datapipes: Iterable DataPipes being concatenated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> import random + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1 = IterableWrapper(range(3)) + >>> dp2 = IterableWrapper(range(5)) + >>> list(dp1.concat(dp2)) + [0, 1, 2, 0, 1, 2, 3, 4] + """ + + # Functional form of 'DemultiplexerIterDataPipe' + def demux(self, num_instances: int, classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000) -> List[IterDataPipe]: + r""" + Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``). + + A list of the child DataPipes is returned from this operation. + + Args: + datapipe: Iterable DataPipe being filtered + num_instances: number of instances of the DataPipe to create + classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None`` + drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None`` + buffer_size: this defines the maximum number of inputs that the buffer can hold across all child + DataPipes while waiting for their values to be yielded. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + + Examples: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def odd_or_even(n): + ... return n % 2 + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even) + >>> list(dp1) + [0, 2, 4] + >>> list(dp2) + [1, 3] + >>> # It can also filter out any element that gets `None` from the `classifier_fn` + >>> def odd_or_even_no_zero(n): + ... return n % 2 if n != 0 else None + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True) + >>> list(dp1) + [2, 4] + >>> list(dp2) + [1, 3] + """ + + # Functional form of 'FilterIterDataPipe' + def filter(self, filter_fn: Callable, input_col=None) -> IterDataPipe: + r""" + Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``). + + Args: + datapipe: Iterable DataPipe being filtered + filter_fn: Customized function mapping an element to a boolean. + input_col: Index or indices of data which ``filter_fn`` is applied, such as: + + - ``None`` as default to apply ``filter_fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def is_even(n): + ... return n % 2 == 0 + >>> dp = IterableWrapper(range(5)) + >>> filter_dp = dp.filter(filter_fn=is_even) + >>> list(filter_dp) + [0, 2, 4] + """ + + # Functional form of 'ForkerIterDataPipe' + def fork(self, num_instances: int, buffer_size: int = 1000, copy: Optional[Literal["shallow", "deep"]] = None) -> List[IterDataPipe]: + r""" + Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``). + + Args: + datapipe: Iterable DataPipe being copied + num_instances: number of instances of the datapipe to create + buffer_size: this restricts how far ahead the leading child DataPipe + can read relative to the slowest child DataPipe. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + copy: copy strategy to use for items yielded by each branch. Supported + options are ``None`` for no copying, ``"shallow"`` for shallow object + copies, and ``"deep"`` for deep object copies. Defaults to ``None``. + + Note: + All branches of the forked pipeline return the identical object unless + the copy parameter is supplied. If the object is mutable or contains + mutable objects, changing them in one branch will affect all others. + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.fork(num_instances=2) + >>> list(dp1) + [0, 1, 2, 3, 4] + >>> list(dp2) + [0, 1, 2, 3, 4] + """ + + # Functional form of 'GrouperIterDataPipe' + def groupby(self, group_key_fn: Callable[[T_co], Any], *, keep_key: bool = False, buffer_size: int = 10000, group_size: Optional[int] = None, guaranteed_group_size: Optional[int] = None, drop_remaining: bool = False) -> IterDataPipe: + r""" + Groups data from IterDataPipe by keys from ``group_key_fn``, yielding a ``DataChunk`` with batch size up to ``group_size``. + + (functional name: ``groupby``). + + The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group + will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full, + the DataPipe will yield the largest batch with the same key, provided that its size is larger + than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``. + + After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity + will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``. + + Args: + datapipe: Iterable datapipe to be grouped + group_key_fn: Function used to generate group key from the data of the source datapipe + keep_key: Option to yield the matching key along with the items in a tuple, + resulting in `(key, [items])` otherwise returning [items] + buffer_size: The size of buffer for ungrouped data + group_size: The max size of each group, a batch is yielded as soon as it reaches this size + guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full + drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer + when the buffer is full + + Example: + >>> import os + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def group_fn(file): + ... return os.path.basename(file).split(".")[0] + >>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"]) + >>> dp0 = source_dp.groupby(group_key_fn=group_fn) + >>> list(dp0) + [['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']] + >>> # A group is yielded as soon as its size equals to `group_size` + >>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2) + >>> list(dp1) + [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']] + >>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size` + >>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2) + >>> list(dp2) + [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']] + """ + + # Functional form of 'FileListerIterDataPipe' + def list_files(self, masks: Union[str, List[str]] = '', *, recursive: bool = False, abspath: bool = False, non_deterministic: bool = False, length: int = -1) -> IterDataPipe: + r""" + Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory. + + Multiple root directories can be provided (functional name: ``list_files``). + + Args: + root: Root directory or a sequence of root directories + masks: Unix style filter string or string list for filtering file name(s) + recursive: Whether to return pathname from nested directories or not + abspath: Whether to return relative pathname or absolute pathname + non_deterministic: Whether to return pathname in sorted order or not. + If ``False``, the results yielded from each root directory will be sorted + length: Nominal length of the datapipe + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister + >>> dp = FileLister(root=".", recursive=True) + >>> list(dp) + ['example.py', './data/data.tar'] + """ + + # Functional form of 'MapperIterDataPipe' + def map(self, fn: Callable, input_col=None, output_col=None) -> IterDataPipe: + r""" + Applies a function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source Iterable DataPipe + fn: Function being applied over each item + input_col: Index or indices of data which ``fn`` is applied, such as: + + - ``None`` as default to apply ``fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified + only when ``input_col`` is not ``None`` + + - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with + multiple indices, the left-most one is used, and other indices will be removed. + - Integer is used for list/tuple. ``-1`` represents to append result at the end. + - Key is used for dict. New key is acceptable. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = IterableWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle` + >>> # Use `functools.partial` or explicitly define the function instead + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + # Functional form of 'MultiplexerIterDataPipe' + def mux(self, *datapipes) -> IterDataPipe: + r""" + Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``). + + As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration, + and so on. It ends when the shortest input DataPipe is exhausted. + + Args: + datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.mux(dp2, dp3)) + [0, 10, 20, 1, 11, 21, 2, 12, 22] + """ + + # Functional form of 'FileOpenerIterDataPipe' + def open_files(self, mode: str = 'r', encoding: Optional[str] = None, length: int = -1) -> IterDataPipe: + r""" + Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``). + + Args: + datapipe: Iterable datapipe that provides pathnames + mode: An optional string that specifies the mode in which + the file is opened by ``open()``. It defaults to ``r``, other options are + ``b`` for reading in binary mode and ``t`` for text mode. + encoding: An optional string that specifies the encoding of the + underlying file. It defaults to ``None`` to match the default encoding of ``open``. + length: Nominal length of the datapipe + + Note: + The opened file handles will be closed by Python's GC periodically. Users can choose + to close them explicitly. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader + >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt')) + >>> dp = FileOpener(dp) + >>> dp = StreamReader(dp) + >>> list(dp) + [('./abc.txt', 'abc')] + """ + + # Functional form of 'StreamReaderIterDataPipe' + def read_from_stream(self, chunk=None) -> IterDataPipe: + r""" + Given IO streams and their label names, yield bytes with label name as tuple. + + (functional name: ``read_from_stream``). + + Args: + datapipe: Iterable DataPipe provides label/URL and byte stream + chunk: Number of bytes to be read from stream per iteration. + If ``None``, all bytes will be read until the EOF. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, StreamReader + >>> from io import StringIO + >>> dp = IterableWrapper([("alphabet", StringIO("abcde"))]) + >>> list(StreamReader(dp, chunk=1)) + [('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')] + """ + + # Functional form of 'RoutedDecoderIterDataPipe' + def routed_decode(self, *handlers: Callable, key_fn: Callable= ...) -> IterDataPipe: + r""" + Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple. + + (functional name: ``routed_decode``) + + Args: + datapipe: Iterable datapipe that provides pathname and binary stream in tuples + handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder + handlers will be set as default. If multiple handles are provided, the priority + order follows the order of handlers (the first handler has the top priority) + key_fn: Function for decoder to extract key from pathname to dispatch handlers. + Default is set to extract file extension from pathname + + Note: + When ``key_fn`` is specified returning anything other than extension, the default + handler will not work and users need to specify custom handler. Custom handler + could use regex to determine the eligibility to handle data. + """ + + # Functional form of 'ShardingFilterIterDataPipe' + def sharding_filter(self, sharding_group_filter=None) -> IterDataPipe: + r""" + Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``). + + After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the + original DataPipe, where `n` equals to the number of instances. + + Args: + source_datapipe: Iterable DataPipe that will be sharded + """ + + # Functional form of 'ShufflerIterDataPipe' + def shuffle(self, *, buffer_size: int = 10000, unbatch_level: int = 0) -> IterDataPipe: + r""" + Shuffle the input DataPipe with a buffer (functional name: ``shuffle``). + + The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then, + each item will be yielded from the buffer by reservoir sampling via iterator. + + ``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the + datapipe is not shuffled. In order to fully shuffle all elements from datapipe, + ``buffer_size`` is required to be greater than or equal to the size of datapipe. + + When it is used with :class:`torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed + for each worker process. + + Args: + datapipe: The IterDataPipe being shuffled + buffer_size: The buffer size for shuffling (default to ``10000``) + unbatch_level: Specifies if it is necessary to unbatch source data before + applying the shuffle + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> shuffle_dp = dp.shuffle() + >>> list(shuffle_dp) + [0, 4, 1, 6, 3, 2, 9, 5, 7, 8] + """ + + # Functional form of 'UnBatcherIterDataPipe' + def unbatch(self, unbatch_level: int = 1) -> IterDataPipe: + r""" + Undos batching of data (functional name: ``unbatch``). + + In other words, it flattens the data up to the specified level within a batched DataPipe. + + Args: + datapipe: Iterable DataPipe being un-batched + unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``, + it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]]) + >>> dp1 = source_dp.unbatch() + >>> list(dp1) + [[0, 1], [2], [3, 4], [5], [6]] + >>> dp2 = source_dp.unbatch(unbatch_level=2) + >>> list(dp2) + [0, 1, 2, 3, 4, 5, 6] + """ + + # Functional form of 'ZipperIterDataPipe' + def zip(self, *datapipes: IterDataPipe) -> IterDataPipe: + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + The output is stopped as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Iterable DataPipes being aggregated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.zip(dp2, dp3)) + [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)] + """ + + +class DFIterDataPipe(IterDataPipe): + def _is_dfpipe(self): ... + def __iter__(self): ... + +class _DataPipeSerializationWrapper: + def __init__(self, datapipe): ... + def __getstate__(self): ... + def __setstate__(self, state): ... + def __len__(self): ... + +class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe): + def __iter__(self): ... + +class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe): + def __getitem__(self, idx): ... + +class DataChunk(list, Generic[T]): + def __init__(self, items): + super().__init__(items) + self.items = items + def as_str(self, indent: str = "") -> str: + res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]" + return res + def __iter__(self) -> Iterator[T]: + yield from super().__iter__() + def raw_iterator(self) -> T: # type: ignore[misc] + yield from self.items diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f8a801bd0763d206926d50bb513be6b9d3135d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py @@ -0,0 +1,246 @@ +import os +import pathlib +from collections import defaultdict +from typing import Any, Dict, List, Set, Tuple, Union + + +def materialize_lines(lines: List[str], indentation: int) -> str: + output = "" + new_line_with_indent = "\n" + " " * indentation + for i, line in enumerate(lines): + if i != 0: + output += new_line_with_indent + output += line.replace('\n', new_line_with_indent) + return output + + +def gen_from_template(dir: str, template_name: str, output_name: str, replacements: List[Tuple[str, Any, int]]): + + template_path = os.path.join(dir, template_name) + output_path = os.path.join(dir, output_name) + + with open(template_path) as f: + content = f.read() + for placeholder, lines, indentation in replacements: + with open(output_path, "w") as f: + content = content.replace(placeholder, materialize_lines(lines, indentation)) + f.write(content) + + +def find_file_paths(dir_paths: List[str], files_to_exclude: Set[str]) -> Set[str]: + """ + When given a path to a directory, returns the paths to the relevant files within it. + + This function does NOT recursive traverse to subdirectories. + """ + paths: Set[str] = set() + for dir_path in dir_paths: + all_files = os.listdir(dir_path) + python_files = {fname for fname in all_files if ".py" == fname[-3:]} + filter_files = {fname for fname in python_files if fname not in files_to_exclude} + paths.update({os.path.join(dir_path, fname) for fname in filter_files}) + return paths + + +def extract_method_name(line: str) -> str: + """Extract method name from decorator in the form of "@functional_datapipe({method_name})".""" + if "(\"" in line: + start_token, end_token = "(\"", "\")" + elif "(\'" in line: + start_token, end_token = "(\'", "\')" + else: + raise RuntimeError(f"Unable to find appropriate method name within line:\n{line}") + start, end = line.find(start_token) + len(start_token), line.find(end_token) + return line[start:end] + + +def extract_class_name(line: str) -> str: + """Extract class name from class definition in the form of "class {CLASS_NAME}({Type}):".""" + start_token = "class " + end_token = "(" + start, end = line.find(start_token) + len(start_token), line.find(end_token) + return line[start:end] + + +def parse_datapipe_file(file_path: str) -> Tuple[Dict[str, str], Dict[str, str], Set[str], Dict[str, List[str]]]: + """Given a path to file, parses the file and returns a dictionary of method names to function signatures.""" + method_to_signature, method_to_class_name, special_output_type = {}, {}, set() + doc_string_dict = defaultdict(list) + with open(file_path) as f: + open_paren_count = 0 + method_name, class_name, signature = "", "", "" + skip = False + for line in f: + if line.count("\"\"\"") % 2 == 1: + skip = not skip + if skip or "\"\"\"" in line: # Saving docstrings + doc_string_dict[method_name].append(line) + continue + if "@functional_datapipe" in line: + method_name = extract_method_name(line) + doc_string_dict[method_name] = [] + continue + if method_name and "class " in line: + class_name = extract_class_name(line) + continue + if method_name and ("def __init__(" in line or "def __new__(" in line): + if "def __new__(" in line: + special_output_type.add(method_name) + open_paren_count += 1 + start = line.find("(") + len("(") + line = line[start:] + if open_paren_count > 0: + open_paren_count += line.count('(') + open_paren_count -= line.count(')') + if open_paren_count == 0: + end = line.rfind(')') + signature += line[:end] + method_to_signature[method_name] = process_signature(signature) + method_to_class_name[method_name] = class_name + method_name, class_name, signature = "", "", "" + elif open_paren_count < 0: + raise RuntimeError("open parenthesis count < 0. This shouldn't be possible.") + else: + signature += line.strip('\n').strip(' ') + return method_to_signature, method_to_class_name, special_output_type, doc_string_dict + + +def parse_datapipe_files(file_paths: Set[str]) -> Tuple[Dict[str, str], Dict[str, str], Set[str], Dict[str, List[str]]]: + methods_and_signatures, methods_and_class_names, methods_with_special_output_types = {}, {}, set() + methods_and_doc_strings = {} + for path in file_paths: + ( + method_to_signature, + method_to_class_name, + methods_needing_special_output_types, + doc_string_dict, + ) = parse_datapipe_file(path) + methods_and_signatures.update(method_to_signature) + methods_and_class_names.update(method_to_class_name) + methods_with_special_output_types.update(methods_needing_special_output_types) + methods_and_doc_strings.update(doc_string_dict) + return methods_and_signatures, methods_and_class_names, methods_with_special_output_types, methods_and_doc_strings + + +def split_outside_bracket(line: str, delimiter: str = ",") -> List[str]: + """Given a line of text, split it on comma unless the comma is within a bracket '[]'.""" + bracket_count = 0 + curr_token = "" + res = [] + for char in line: + if char == "[": + bracket_count += 1 + elif char == "]": + bracket_count -= 1 + elif char == delimiter and bracket_count == 0: + res.append(curr_token) + curr_token = "" + continue + curr_token += char + res.append(curr_token) + return res + + +def process_signature(line: str) -> str: + """ + Clean up a given raw function signature. + + This includes removing the self-referential datapipe argument, default + arguments of input functions, newlines, and spaces. + """ + tokens: List[str] = split_outside_bracket(line) + for i, token in enumerate(tokens): + tokens[i] = token.strip(' ') + if token == "cls": + tokens[i] = "self" + elif i > 0 and ("self" == tokens[i - 1]) and (tokens[i][0] != "*"): + # Remove the datapipe after 'self' or 'cls' unless it has '*' + tokens[i] = "" + elif "Callable =" in token: # Remove default argument if it is a function + head, default_arg = token.rsplit("=", 2) + tokens[i] = head.strip(' ') + "= ..." + tokens = [t for t in tokens if t != ""] + line = ', '.join(tokens) + return line + + +def get_method_definitions(file_path: Union[str, List[str]], + files_to_exclude: Set[str], + deprecated_files: Set[str], + default_output_type: str, + method_to_special_output_type: Dict[str, str], + root: str = "") -> List[str]: + """ + #.pyi generation for functional DataPipes Process. + + # 1. Find files that we want to process (exclude the ones who don't) + # 2. Parse method name and signature + # 3. Remove first argument after self (unless it is "*datapipes"), default args, and spaces + """ + if root == "": + root = str(pathlib.Path(__file__).parent.resolve()) + file_path = [file_path] if isinstance(file_path, str) else file_path + file_path = [os.path.join(root, path) for path in file_path] + file_paths = find_file_paths(file_path, + files_to_exclude=files_to_exclude.union(deprecated_files)) + methods_and_signatures, methods_and_class_names, methods_w_special_output_types, methods_and_doc_strings = \ + parse_datapipe_files(file_paths) + + for fn_name in method_to_special_output_type: + if fn_name not in methods_w_special_output_types: + methods_w_special_output_types.add(fn_name) + + method_definitions = [] + for method_name, arguments in methods_and_signatures.items(): + class_name = methods_and_class_names[method_name] + if method_name in methods_w_special_output_types: + output_type = method_to_special_output_type[method_name] + else: + output_type = default_output_type + doc_string = "".join(methods_and_doc_strings[method_name]) + if doc_string == "": + doc_string = " ...\n" + method_definitions.append(f"# Functional form of '{class_name}'\n" + f"def {method_name}({arguments}) -> {output_type}:\n" + f"{doc_string}") + method_definitions.sort(key=lambda s: s.split('\n')[1]) # sorting based on method_name + + return method_definitions + + +# Defined outside of main() so they can be imported by TorchData +iterDP_file_path: str = "iter" +iterDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"} +iterDP_deprecated_files: Set[str] = set() +iterDP_method_to_special_output_type: Dict[str, str] = {"demux": "List[IterDataPipe]", "fork": "List[IterDataPipe]"} + +mapDP_file_path: str = "map" +mapDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"} +mapDP_deprecated_files: Set[str] = set() +mapDP_method_to_special_output_type: Dict[str, str] = {"shuffle": "IterDataPipe"} + + +def main() -> None: + """ + # Inject file into template datapipe.pyi.in. + + TODO: The current implementation of this script only generates interfaces for built-in methods. To generate + interface for user-defined DataPipes, consider changing `IterDataPipe.register_datapipe_as_function`. + """ + iter_method_definitions = get_method_definitions(iterDP_file_path, iterDP_files_to_exclude, iterDP_deprecated_files, + "IterDataPipe", iterDP_method_to_special_output_type) + + map_method_definitions = get_method_definitions(mapDP_file_path, mapDP_files_to_exclude, mapDP_deprecated_files, + "MapDataPipe", mapDP_method_to_special_output_type) + + path = pathlib.Path(__file__).parent.resolve() + replacements = [('${IterDataPipeMethods}', iter_method_definitions, 4), + ('${MapDataPipeMethods}', map_method_definitions, 4)] + gen_from_template(dir=str(path), + template_name="datapipe.pyi.in", + output_name="datapipe.pyi", + replacements=replacements) + + +if __name__ == '__main__': + main() diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py new file mode 100644 index 0000000000000000000000000000000000000000..48875e40a68d111042e21d086cf895b24b6e0474 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py @@ -0,0 +1,237 @@ +import functools +from collections import namedtuple + +from typing import Callable, Iterator, Sized, TypeVar, Optional, Union, Any, Dict, List + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data._utils.collate import default_collate +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import (_check_unpickable_fn, + validate_input_col) + +__all__ = [ + "CollatorIterDataPipe", + "MapperIterDataPipe", +] + +T_co = TypeVar("T_co", covariant=True) + + +@functional_datapipe("map") +class MapperIterDataPipe(IterDataPipe[T_co]): + r""" + Applies a function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source Iterable DataPipe + fn: Function being applied over each item + input_col: Index or indices of data which ``fn`` is applied, such as: + + - ``None`` as default to apply ``fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified + only when ``input_col`` is not ``None`` + + - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with + multiple indices, the left-most one is used, and other indices will be removed. + - Integer is used for list/tuple. ``-1`` represents to append result at the end. + - Key is used for dict. New key is acceptable. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = IterableWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle` + >>> # Use `functools.partial` or explicitly define the function instead + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + datapipe: IterDataPipe + fn: Callable + + def __init__( + self, + datapipe: IterDataPipe, + fn: Callable, + input_col=None, + output_col=None, + ) -> None: + super().__init__() + self.datapipe = datapipe + + _check_unpickable_fn(fn) + self.fn = fn # type: ignore[assignment] + + self.input_col = input_col + if input_col is None and output_col is not None: + raise ValueError("`output_col` must be None when `input_col` is None.") + if isinstance(output_col, (list, tuple)): + if len(output_col) > 1: + raise ValueError("`output_col` must be a single-element list or tuple") + output_col = output_col[0] + self.output_col = output_col + validate_input_col(fn, input_col) + + def _apply_fn(self, data): + if self.input_col is None and self.output_col is None: + return self.fn(data) + + if self.input_col is None: + res = self.fn(data) + elif isinstance(self.input_col, (list, tuple)): + args = tuple(data[col] for col in self.input_col) + res = self.fn(*args) + else: + res = self.fn(data[self.input_col]) + + # Copy tuple to list and run in-place modification because tuple is immutable. + if isinstance(data, tuple): + t_flag = True + data = list(data) + else: + t_flag = False + + if self.output_col is None: + if isinstance(self.input_col, (list, tuple)): + data[self.input_col[0]] = res + for idx in sorted(self.input_col[1:], reverse=True): + del data[idx] + else: + data[self.input_col] = res + else: + if self.output_col == -1: + data.append(res) + else: + data[self.output_col] = res + + # Convert list back to tuple + return tuple(data) if t_flag else data + + def __iter__(self) -> Iterator[T_co]: + for data in self.datapipe: + yield self._apply_fn(data) + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + return len(self.datapipe) + raise TypeError( + f"{type(self).__name__} instance doesn't have valid length" + ) + + +def _collate_helper(conversion, item): + # TODO(VitalyFedyunin): Verify that item is any sort of batch + if len(item.items) > 1: + # TODO(VitalyFedyunin): Compact all batch dataframes into one + raise Exception("Only supports one DataFrame per batch") + df = item[0] + columns_name = df_wrapper.get_columns(df) + tuple_names: List = [] + tuple_values: List = [] + + for name in conversion.keys(): + if name not in columns_name: + raise Exception("Conversion keys missmatch") + + for name in columns_name: + if name in conversion: + if not callable(conversion[name]): + raise Exception('Collate (DF)DataPipe requires callable as dict values') + collation_fn = conversion[name] + else: + # TODO(VitalyFedyunin): Add default collation into df_wrapper + try: + import torcharrow.pytorch as tap # type: ignore[import] + collation_fn = tap.rec.Default() + except Exception as e: + raise Exception("unable to import default collation function from the TorchArrow") from e + + tuple_names.append(str(name)) + value = collation_fn(df[name]) + tuple_values.append(value) + + # TODO(VitalyFedyunin): We can dynamically extract types from the tuple_values here + # TODO(VitalyFedyunin): Instead of ignoring mypy error, make sure tuple_names is not empty + tpl_cls = namedtuple("CollateResult", tuple_names) # type: ignore[misc] + tuple = tpl_cls(*tuple_values) + return tuple + + +@functional_datapipe("collate") +class CollatorIterDataPipe(MapperIterDataPipe): + r""" + Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``). + + By default, it uses :func:`torch.utils.data.default_collate`. + + .. note:: + While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the + default behavior and `functools.partial` to specify any additional arguments. + + Args: + datapipe: Iterable DataPipe being collated + collate_fn: Customized collate function to collect and combine data or a batch of data. + Default function collates to Tensor(s) based on data type. + + Example: + >>> # xdoctest: +SKIP + >>> # Convert integer data to float Tensor + >>> class MyIterDataPipe(torch.utils.data.IterDataPipe): + ... def __init__(self, start, end): + ... super(MyIterDataPipe).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + ... def __len__(self): + ... return self.end - self.start + ... + >>> ds = MyIterDataPipe(start=3, end=7) + >>> print(list(ds)) + [3, 4, 5, 6] + >>> def collate_fn(batch): + ... return torch.tensor(batch, dtype=torch.float) + ... + >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn) + >>> print(list(collated_ds)) + [tensor(3.), tensor(4.), tensor(5.), tensor(6.)] + """ + + def __init__( + self, + datapipe: IterDataPipe, + conversion: Optional[ + Union[ + Callable[..., Any], + Dict[Union[str, Any], Union[Callable, Any]], + ] + ] = default_collate, + collate_fn: Optional[Callable] = None, + ) -> None: + # TODO(VitalyFedyunin): Replace `Callable[..., Any]` with `Callable[[IColumn], Any]` + # TODO(VitalyFedyunin): Replace with `Dict[Union[str, IColumn], Union[Callable, Enum]]` + if collate_fn is not None: + super().__init__(datapipe, fn=collate_fn) + else: + if callable(conversion): + super().__init__(datapipe, fn=conversion) + else: + # TODO(VitalyFedyunin): Validate passed dictionary + collate_fn = functools.partial(_collate_helper, conversion) + super().__init__(datapipe, fn=collate_fn) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py new file mode 100644 index 0000000000000000000000000000000000000000..67e9797fe3356f9d0756de492eee3ca618f43fd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py @@ -0,0 +1,71 @@ +from io import IOBase +from typing import Iterable, Tuple, Optional + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import get_file_binaries_from_pathnames + +__all__ = [ + "FileOpenerIterDataPipe", +] + + +@functional_datapipe("open_files") +class FileOpenerIterDataPipe(IterDataPipe[Tuple[str, IOBase]]): + r""" + Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``). + + Args: + datapipe: Iterable datapipe that provides pathnames + mode: An optional string that specifies the mode in which + the file is opened by ``open()``. It defaults to ``r``, other options are + ``b`` for reading in binary mode and ``t`` for text mode. + encoding: An optional string that specifies the encoding of the + underlying file. It defaults to ``None`` to match the default encoding of ``open``. + length: Nominal length of the datapipe + + Note: + The opened file handles will be closed by Python's GC periodically. Users can choose + to close them explicitly. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader + >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt')) + >>> dp = FileOpener(dp) + >>> dp = StreamReader(dp) + >>> list(dp) + [('./abc.txt', 'abc')] + """ + + def __init__( + self, + datapipe: Iterable[str], + mode: str = 'r', + encoding: Optional[str] = None, + length: int = -1): + super().__init__() + self.datapipe: Iterable = datapipe + self.mode: str = mode + self.encoding: Optional[str] = encoding + + if self.mode not in ('b', 't', 'rb', 'rt', 'r'): + raise ValueError(f"Invalid mode {mode}") + # TODO: enforce typing for each instance based on mode, otherwise + # `argument_validation` with this DataPipe may be potentially broken + + if 'b' in mode and encoding is not None: + raise ValueError("binary mode doesn't take an encoding argument") + + self.length: int = length + + # Remove annotation due to 'IOBase' is a general type and true type + # is determined at runtime based on mode. Some `DataPipe` requiring + # a subtype would cause mypy error. + def __iter__(self): + yield from get_file_binaries_from_pathnames(self.datapipe, self.mode, self.encoding) + + def __len__(self): + if self.length == -1: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + return self.length diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py new file mode 100644 index 0000000000000000000000000000000000000000..fee74582e61bd613a60bf5eac7c7f5c3f60ca91f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py @@ -0,0 +1,96 @@ +from typing import Callable, Iterator, Tuple, TypeVar + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper +from torch.utils.data.datapipes.utils.common import ( + _check_unpickable_fn, + StreamWrapper, + validate_input_col +) + + +__all__ = ["FilterIterDataPipe", ] + +T = TypeVar('T') +T_co = TypeVar('T_co', covariant=True) + + +@functional_datapipe('filter') +class FilterIterDataPipe(IterDataPipe[T_co]): + r""" + Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``). + + Args: + datapipe: Iterable DataPipe being filtered + filter_fn: Customized function mapping an element to a boolean. + input_col: Index or indices of data which ``filter_fn`` is applied, such as: + + - ``None`` as default to apply ``filter_fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def is_even(n): + ... return n % 2 == 0 + >>> dp = IterableWrapper(range(5)) + >>> filter_dp = dp.filter(filter_fn=is_even) + >>> list(filter_dp) + [0, 2, 4] + """ + + datapipe: IterDataPipe[T_co] + filter_fn: Callable + + def __init__( + self, + datapipe: IterDataPipe[T_co], + filter_fn: Callable, + input_col=None, + ) -> None: + super().__init__() + self.datapipe = datapipe + + _check_unpickable_fn(filter_fn) + self.filter_fn = filter_fn # type: ignore[assignment] + + self.input_col = input_col + validate_input_col(filter_fn, input_col) + + def _apply_filter_fn(self, data) -> bool: + if self.input_col is None: + return self.filter_fn(data) + elif isinstance(self.input_col, (list, tuple)): + args = tuple(data[col] for col in self.input_col) + return self.filter_fn(*args) + else: + return self.filter_fn(data[self.input_col]) + + def __iter__(self) -> Iterator[T_co]: + for data in self.datapipe: + condition, filtered = self._returnIfTrue(data) + if condition: + yield filtered + else: + StreamWrapper.close_streams(data) + + def _returnIfTrue(self, data: T) -> Tuple[bool, T]: + condition = self._apply_filter_fn(data) + + if df_wrapper.is_column(condition): + # We are operating on DataFrames filter here + result = [] + for idx, mask in enumerate(df_wrapper.iterate(condition)): + if mask: + result.append(df_wrapper.get_item(data, idx)) + if len(result): + return True, df_wrapper.concat(result) + else: + return False, None # type: ignore[return-value] + + if not isinstance(condition, bool): + raise ValueError("Boolean output is required for `filter_fn` of FilterIterDataPipe, got", type(condition)) + + return condition, data diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3794f7f0e77834bf5da0d21be8a2d00285eb07ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py @@ -0,0 +1,51 @@ +import copy +import warnings +from torch.utils.data.datapipes.datapipe import IterDataPipe + +__all__ = ["IterableWrapperIterDataPipe", ] + + +class IterableWrapperIterDataPipe(IterDataPipe): + r""" + Wraps an iterable object to create an IterDataPipe. + + Args: + iterable: Iterable object to be wrapped into an IterDataPipe + deepcopy: Option to deepcopy input iterable object for each + iterator. The copy is made when the first element is read in ``iter()``. + + .. note:: + If ``deepcopy`` is explicitly set to ``False``, users should ensure + that the data pipeline doesn't contain any in-place operations over + the iterable instance to prevent data inconsistency across iterations. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> list(dp) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + """ + + def __init__(self, iterable, deepcopy=True): + self.iterable = iterable + self.deepcopy = deepcopy + + def __iter__(self): + source_data = self.iterable + if self.deepcopy: + try: + source_data = copy.deepcopy(self.iterable) + # For the case that data cannot be deep-copied, + # all in-place operations will affect iterable variable. + # When this DataPipe is iterated second time, it will + # yield modified items. + except TypeError: + warnings.warn( + "The input iterable can not be deepcopied, " + "please be aware of in-place modification would affect source data." + ) + yield from source_data + + def __len__(self): + return len(self.iterable) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dee04d15cc7b4f501a0b263a4be598954f3016f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__init__.py @@ -0,0 +1,17 @@ +# Functional DataPipe +from torch.utils.data.datapipes.map.callable import MapperMapDataPipe as Mapper +from torch.utils.data.datapipes.map.combinatorics import ShufflerIterDataPipe as Shuffler +from torch.utils.data.datapipes.map.combining import ( + ConcaterMapDataPipe as Concater, + ZipperMapDataPipe as Zipper +) +from torch.utils.data.datapipes.map.grouping import ( + BatcherMapDataPipe as Batcher +) +from torch.utils.data.datapipes.map.utils import SequenceWrapperMapDataPipe as SequenceWrapper + + +__all__ = ['Batcher', 'Concater', 'Mapper', 'SequenceWrapper', 'Shuffler', 'Zipper'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e54998870cd90d36a3005e3f2184342df9e79789 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/callable.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/callable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..716f3f264f7a4dd01b3121242d3cdff2c4f44ac6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/callable.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combinatorics.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combinatorics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e119d547e1133ef6f3d16409931fa3913c4345b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combinatorics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combining.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combining.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c5a78788e106737f83f0ca8f15474b53532dff9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combining.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/grouping.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/grouping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a37e733e73d2a1eb856f85637820fc0bc4865e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/grouping.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8adf44ec71c66f8499d45183c99fbe36f822e657 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/callable.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/callable.py new file mode 100644 index 0000000000000000000000000000000000000000..c9202bb1eefbb36373a2c805036687926bc97dec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/callable.py @@ -0,0 +1,61 @@ +from torch.utils.data.datapipes.utils.common import _check_unpickable_fn +from typing import Callable, TypeVar +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import MapDataPipe + +__all__ = ["MapperMapDataPipe", "default_fn"] + +T_co = TypeVar('T_co', covariant=True) + + +# Default function to return each item directly +# In order to keep datapipe picklable, eliminates the usage +# of python lambda function +def default_fn(data): + return data + + +@functional_datapipe('map') +class MapperMapDataPipe(MapDataPipe[T_co]): + r""" + Apply the input function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source MapDataPipe + fn: Function being applied to each item + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = SequenceWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + datapipe: MapDataPipe + fn: Callable + + def __init__( + self, + datapipe: MapDataPipe, + fn: Callable = default_fn, + ) -> None: + super().__init__() + self.datapipe = datapipe + _check_unpickable_fn(fn) + self.fn = fn # type: ignore[assignment] + + def __len__(self) -> int: + return len(self.datapipe) + + def __getitem__(self, index) -> T_co: + return self.fn(self.datapipe[index]) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py new file mode 100644 index 0000000000000000000000000000000000000000..c21d532d4925d59296d2f111c55a6755b4ae9101 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py @@ -0,0 +1,126 @@ +import random + +import torch +from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe +from typing import Iterator, List, Optional, TypeVar + +__all__ = ["ShufflerIterDataPipe", ] + + +T_co = TypeVar('T_co', covariant=True) + + +# @functional_datapipe('shuffle') +class ShufflerIterDataPipe(IterDataPipe[T_co]): + r""" + Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``). + + When it is used with :class:`~torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed + for each worker process. + + Args: + datapipe: MapDataPipe being shuffled + indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> shuffle_dp = dp.shuffle().set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + >>> list(shuffle_dp) + [6, 1, 9, 5, 2, 4, 7, 3, 8, 0] + >>> # Reset seed for Shuffler + >>> shuffle_dp = shuffle_dp.set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + + Note: + Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an + ``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to + the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order + of data during data-processing. + """ + + datapipe: MapDataPipe[T_co] + _enabled: bool + _seed: Optional[int] + _rng: random.Random + + def __init__(self, + datapipe: MapDataPipe[T_co], + *, + indices: Optional[List] = None, + ) -> None: + super().__init__() + self.datapipe = datapipe + self.indices = list(range(len(datapipe))) if indices is None else indices + self._enabled = True + self._seed = None + self._rng = random.Random() + self._shuffled_indices: List = self.indices + + def set_shuffle(self, shuffle=True): + self._enabled = shuffle + return self + + def set_seed(self, seed: int): + self._seed = seed + return self + + def __iter__(self) -> Iterator[T_co]: + if not self._enabled: + for idx in self.indices: + yield self.datapipe[idx] + else: + while self._shuffled_indices: + idx = self._shuffled_indices.pop() + yield self.datapipe[idx] + + def reset(self) -> None: + if self._enabled and self._seed is None: + self._seed = int(torch.empty((), dtype=torch.int64).random_().item()) + self._rng.seed(self._seed) + self._seed = None + self._shuffled_indices = self._rng.sample(self.indices, len(self.indices)) + + def __len__(self) -> int: + return len(self.datapipe) + + def __getstate__(self): + state = ( + self.datapipe, + self.indices, + self._enabled, + self._seed, + self._rng.getstate(), + self._shuffled_indices, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.datapipe, + self.indices, + self._enabled, + self._seed, + rng_state, + self._shuffled_indices, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self._rng = random.Random() + self._rng.setstate(rng_state) + + +MapDataPipe.register_datapipe_as_function("shuffle", ShufflerIterDataPipe) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py new file mode 100644 index 0000000000000000000000000000000000000000..809b44dc96cd8f0a8e7d3bf8795f76b512cb244f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py @@ -0,0 +1,99 @@ +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import MapDataPipe +from typing import Sized, Tuple, TypeVar + +__all__ = ["ConcaterMapDataPipe", "ZipperMapDataPipe"] + +T_co = TypeVar('T_co', covariant=True) + + +@functional_datapipe('concat') +class ConcaterMapDataPipe(MapDataPipe): + r""" + Concatenate multiple Map DataPipes (functional name: ``concat``). + + The new index of is the cumulative sum of source DataPipes. + For example, if there are 2 source DataPipes both with length 5, + index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to + elements of the first DataPipe, and 5 to 9 would refer to elements + of the second DataPipe. + + Args: + datapipes: Map DataPipes being concatenated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(3)) + >>> concat_dp = dp1.concat(dp2) + >>> list(concat_dp) + [0, 1, 2, 0, 1, 2] + """ + + datapipes: Tuple[MapDataPipe] + + def __init__(self, *datapipes: MapDataPipe): + if len(datapipes) == 0: + raise ValueError("Expected at least one DataPipe, but got nothing") + if not all(isinstance(dp, MapDataPipe) for dp in datapipes): + raise TypeError("Expected all inputs to be `MapDataPipe`") + if not all(isinstance(dp, Sized) for dp in datapipes): + raise TypeError("Expected all inputs to be `Sized`") + self.datapipes = datapipes # type: ignore[assignment] + + def __getitem__(self, index) -> T_co: # type: ignore[type-var] + offset = 0 + for dp in self.datapipes: + if index - offset < len(dp): + return dp[index - offset] + else: + offset += len(dp) + raise IndexError(f"Index {index} is out of range.") + + def __len__(self) -> int: + return sum(len(dp) for dp in self.datapipes) + + +@functional_datapipe('zip') +class ZipperMapDataPipe(MapDataPipe[Tuple[T_co, ...]]): + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Map DataPipes being aggregated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(10, 13)) + >>> zip_dp = dp1.zip(dp2) + >>> list(zip_dp) + [(0, 10), (1, 11), (2, 12)] + """ + + datapipes: Tuple[MapDataPipe[T_co], ...] + + def __init__(self, *datapipes: MapDataPipe[T_co]) -> None: + if len(datapipes) == 0: + raise ValueError("Expected at least one DataPipe, but got nothing") + if not all(isinstance(dp, MapDataPipe) for dp in datapipes): + raise TypeError("Expected all inputs to be `MapDataPipe`") + if not all(isinstance(dp, Sized) for dp in datapipes): + raise TypeError("Expected all inputs to be `Sized`") + self.datapipes = datapipes + + def __getitem__(self, index) -> Tuple[T_co, ...]: + res = [] + for dp in self.datapipes: + try: + res.append(dp[index]) + except IndexError as e: + raise IndexError(f"Index {index} is out of range for one of the input MapDataPipes {dp}.") from e + return tuple(res) + + def __len__(self) -> int: + return min(len(dp) for dp in self.datapipes) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/grouping.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/grouping.py new file mode 100644 index 0000000000000000000000000000000000000000..a94cc7b5679e9107818f4ec73ae11497b002a7af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/grouping.py @@ -0,0 +1,69 @@ +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import MapDataPipe, DataChunk +from typing import List, Sized, TypeVar + +__all__ = ["BatcherMapDataPipe", ] + +T = TypeVar('T') + + +@functional_datapipe('batch') +class BatcherMapDataPipe(MapDataPipe[DataChunk]): + r""" + Create mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, + or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> batch_dp = dp.batch(batch_size=2) + >>> list(batch_dp) + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] + """ + + datapipe: MapDataPipe + batch_size: int + drop_last: bool + + def __init__(self, + datapipe: MapDataPipe[T], + batch_size: int, + drop_last: bool = False, + wrapper_class=DataChunk, + ) -> None: + assert batch_size > 0, "Batch size is required to be larger than 0!" + super().__init__() + self.datapipe = datapipe + self.batch_size = batch_size + self.drop_last = drop_last + self.wrapper_class = wrapper_class + + def __getitem__(self, index) -> DataChunk: + batch: List = [] + indices = range(index * self.batch_size, (index + 1) * self.batch_size) + try: + for i in indices: + batch.append(self.datapipe[i]) + return self.wrapper_class(batch) + except IndexError as e: + if not self.drop_last and len(batch) > 0: + return self.wrapper_class(batch) + else: + raise IndexError(f"Index {index} is out of bound.") from e + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + if self.drop_last: + return len(self.datapipe) // self.batch_size + else: + return (len(self.datapipe) + self.batch_size - 1) // self.batch_size + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/utils.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..18d4fd18a1936fc89ab881b1cb7a7c826be0f2d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/utils.py @@ -0,0 +1,50 @@ +import copy +import warnings +from torch.utils.data.datapipes.datapipe import MapDataPipe + +__all__ = ["SequenceWrapperMapDataPipe", ] + + +class SequenceWrapperMapDataPipe(MapDataPipe): + r""" + Wraps a sequence object into a MapDataPipe. + + Args: + sequence: Sequence object to be wrapped into an MapDataPipe + deepcopy: Option to deepcopy input sequence object + + .. note:: + If ``deepcopy`` is set to False explicitly, users should ensure + that data pipeline doesn't contain any in-place operations over + the iterable instance, in order to prevent data inconsistency + across iterations. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> list(dp) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> dp = SequenceWrapper({'a': 100, 'b': 200, 'c': 300, 'd': 400}) + >>> dp['a'] + 100 + """ + + def __init__(self, sequence, deepcopy=True): + if deepcopy: + try: + self.sequence = copy.deepcopy(sequence) + except TypeError: + warnings.warn( + "The input sequence can not be deepcopied, " + "please be aware of in-place modification would affect source data" + ) + self.sequence = sequence + else: + self.sequence = sequence + + def __getitem__(self, index): + return self.sequence[index] + + def __len__(self): + return len(self.sequence) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3f0c83e02d30fd41d15f7bc39f1d9ab394e9c0d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b047f5d2bd5ff6b045b454f9e54dc86952b9e59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ee381241fd9f7de9500177878e0869826787f99 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/snapshot.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/snapshot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca16fa6b2e5a4598da8559cf790545f3bb75db1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/snapshot.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/common.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/common.py new file mode 100644 index 0000000000000000000000000000000000000000..faf45c078890c494dcf26e5bb340bcff54123f05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/common.py @@ -0,0 +1,379 @@ +import fnmatch +import functools +import inspect +import os +import warnings + +from io import IOBase + +from functools import partial +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union +from torch.utils._import_utils import dill_available + +__all__ = [ + "validate_input_col", + "StreamWrapper", + "get_file_binaries_from_pathnames", + "get_file_pathnames_from_root", + "match_masks", + "validate_pathname_binary_tuple", +] + + +def validate_input_col(fn: Callable, input_col: Optional[Union[int, tuple, list]]): + """ + Check that function used in a callable datapipe works with the input column. + + This simply ensures that the number of positional arguments matches the size + of the input column. The function must not contain any non-default + keyword-only arguments. + + Examples: + >>> # xdoctest: +SKIP("Failing on some CI machines") + >>> def f(a, b, *, c=1): + >>> return a + b + c + >>> def f_def(a, b=1, *, c=1): + >>> return a + b + c + >>> assert validate_input_col(f, [1, 2]) + >>> assert validate_input_col(f_def, 1) + >>> assert validate_input_col(f_def, [1, 2]) + + Notes: + If the function contains variable positional (`inspect.VAR_POSITIONAL`) arguments, + for example, f(a, *args), the validator will accept any size of input column + greater than or equal to the number of positional arguments. + (in this case, 1). + + Args: + fn: The function to check. + input_col: The input column to check. + + Raises: + ValueError: If the function is not compatible with the input column. + """ + try: + sig = inspect.signature(fn) + except ValueError: # Signature cannot be inspected, likely it is a built-in fn or written in C + return + if isinstance(input_col, (list, tuple)): + input_col_size = len(input_col) + else: + input_col_size = 1 + + pos = [] + var_positional = False + non_default_kw_only = [] + + for p in sig.parameters.values(): + if p.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): + pos.append(p) + elif p.kind is inspect.Parameter.VAR_POSITIONAL: + var_positional = True + elif p.kind is inspect.Parameter.KEYWORD_ONLY: + if p.default is p.empty: + non_default_kw_only.append(p) + else: + continue + + if isinstance(fn, functools.partial): + fn_name = getattr(fn.func, "__name__", repr(fn.func)) + else: + fn_name = getattr(fn, "__name__", repr(fn)) + + if len(non_default_kw_only) > 0: + raise ValueError( + f"The function {fn_name} takes {len(non_default_kw_only)} " + f"non-default keyword-only parameters, which is not allowed." + ) + + if len(sig.parameters) < input_col_size: + if not var_positional: + raise ValueError( + f"The function {fn_name} takes {len(sig.parameters)} " + f"parameters, but {input_col_size} are required." + ) + else: + if len(pos) > input_col_size: + if any(p.default is p.empty for p in pos[input_col_size:]): + raise ValueError( + f"The function {fn_name} takes {len(pos)} " + f"positional parameters, but {input_col_size} are required." + ) + elif len(pos) < input_col_size: + if not var_positional: + raise ValueError( + f"The function {fn_name} takes {len(pos)} " + f"positional parameters, but {input_col_size} are required." + ) + + +def _is_local_fn(fn): + # Functions or Methods + if hasattr(fn, "__code__"): + return fn.__code__.co_flags & inspect.CO_NESTED + # Callable Objects + else: + if hasattr(fn, "__qualname__"): + return "" in fn.__qualname__ + fn_type = type(fn) + if hasattr(fn_type, "__qualname__"): + return "" in fn_type.__qualname__ + return False + + +def _check_unpickable_fn(fn: Callable): + """ + Check function is pickable or not. + + If it is a lambda or local function, a UserWarning will be raised. If it's not a callable function, a TypeError will be raised. + """ + if not callable(fn): + raise TypeError(f"A callable function is expected, but {type(fn)} is provided.") + + # Extract function from partial object + # Nested partial function is automatically expanded as a single partial object + if isinstance(fn, partial): + fn = fn.func + + # Local function + if _is_local_fn(fn) and not dill_available(): + warnings.warn( + "Local function is not supported by pickle, please use " + "regular python function or functools.partial instead." + ) + return + + # Lambda function + if hasattr(fn, "__name__") and fn.__name__ == "" and not dill_available(): + warnings.warn( + "Lambda function is not supported by pickle, please use " + "regular python function or functools.partial instead." + ) + return + + +def match_masks(name : str, masks : Union[str, List[str]]) -> bool: + # empty mask matches any input name + if not masks: + return True + + if isinstance(masks, str): + return fnmatch.fnmatch(name, masks) + + for mask in masks: + if fnmatch.fnmatch(name, mask): + return True + return False + + +def get_file_pathnames_from_root( + root: str, + masks: Union[str, List[str]], + recursive: bool = False, + abspath: bool = False, + non_deterministic: bool = False) -> Iterable[str]: + + # print out an error message and raise the error out + def onerror(err : OSError): + warnings.warn(err.filename + " : " + err.strerror) + raise err + + if os.path.isfile(root): + path = root + if abspath: + path = os.path.abspath(path) + fname = os.path.basename(path) + if match_masks(fname, masks): + yield path + else: + for path, dirs, files in os.walk(root, onerror=onerror): + if abspath: + path = os.path.abspath(path) + if not non_deterministic: + files.sort() + for f in files: + if match_masks(f, masks): + yield os.path.join(path, f) + if not recursive: + break + if not non_deterministic: + # Note that this is in-place modifying the internal list from `os.walk` + # This only works because `os.walk` doesn't shallow copy before turn + # https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/os.py#L407 + dirs.sort() + + +def get_file_binaries_from_pathnames(pathnames: Iterable, mode: str, encoding: Optional[str] = None): + if not isinstance(pathnames, Iterable): + pathnames = [pathnames, ] + + if mode in ('b', 't'): + mode = 'r' + mode + + for pathname in pathnames: + if not isinstance(pathname, str): + raise TypeError(f"Expected string type for pathname, but got {type(pathname)}") + yield pathname, StreamWrapper(open(pathname, mode, encoding=encoding)) + + +def validate_pathname_binary_tuple(data: Tuple[str, IOBase]): + if not isinstance(data, tuple): + raise TypeError(f"pathname binary data should be tuple type, but it is type {type(data)}") + if len(data) != 2: + raise TypeError(f"pathname binary stream tuple length should be 2, but got {len(data)}") + if not isinstance(data[0], str): + raise TypeError(f"pathname within the tuple should have string type pathname, but it is type {type(data[0])}") + if not isinstance(data[1], IOBase) and not isinstance(data[1], StreamWrapper): + raise TypeError( + f"binary stream within the tuple should have IOBase or" + f"its subclasses as type, but it is type {type(data[1])}" + ) + + +# Deprecated function names and its corresponding DataPipe type and kwargs for the `_deprecation_warning` function +_iter_deprecated_functional_names: Dict[str, Dict] = {} +_map_deprecated_functional_names: Dict[str, Dict] = {} + + +def _deprecation_warning( + old_class_name: str, + *, + deprecation_version: str, + removal_version: str, + old_functional_name: str = "", + old_argument_name: str = "", + new_class_name: str = "", + new_functional_name: str = "", + new_argument_name: str = "", + deprecate_functional_name_only: bool = False, +) -> None: + if new_functional_name and not old_functional_name: + raise ValueError("Old functional API needs to be specified for the deprecation warning.") + if new_argument_name and not old_argument_name: + raise ValueError("Old argument name needs to be specified for the deprecation warning.") + + if old_functional_name and old_argument_name: + raise ValueError("Deprecating warning for functional API and argument should be separated.") + + msg = f"`{old_class_name}()`" + if deprecate_functional_name_only and old_functional_name: + msg = f"{msg}'s functional API `.{old_functional_name}()` is" + elif old_functional_name: + msg = f"{msg} and its functional API `.{old_functional_name}()` are" + elif old_argument_name: + msg = f"The argument `{old_argument_name}` of {msg} is" + else: + msg = f"{msg} is" + msg = ( + f"{msg} deprecated since {deprecation_version} and will be removed in {removal_version}." + f"\nSee https://github.com/pytorch/data/issues/163 for details." + ) + + if new_class_name or new_functional_name: + msg = f"{msg}\nPlease use" + if new_class_name: + msg = f"{msg} `{new_class_name}()`" + if new_class_name and new_functional_name: + msg = f"{msg} or" + if new_functional_name: + msg = f"{msg} `.{new_functional_name}()`" + msg = f"{msg} instead." + + if new_argument_name: + msg = f"{msg}\nPlease use `{old_class_name}({new_argument_name}=)` instead." + + warnings.warn(msg, FutureWarning) + + +class StreamWrapper: + """ + StreamWrapper is introduced to wrap file handler generated by DataPipe operation like `FileOpener`. + + StreamWrapper would guarantee the wrapped file handler is closed when it's out of scope. + """ + + session_streams: Dict[Any, int] = {} + debug_unclosed_streams: bool = False + + def __init__(self, file_obj, parent_stream=None, name=None): + self.file_obj = file_obj + self.child_counter = 0 + self.parent_stream = parent_stream + self.close_on_last_child = False + self.name = name + self.closed = False + if parent_stream is not None: + if not isinstance(parent_stream, StreamWrapper): + raise RuntimeError(f'Parent stream should be StreamWrapper, {type(parent_stream)} was given') + parent_stream.child_counter += 1 + self.parent_stream = parent_stream + if StreamWrapper.debug_unclosed_streams: + StreamWrapper.session_streams[self] = 1 + + @classmethod + def close_streams(cls, v, depth=0): + """Traverse structure and attempts to close all found StreamWrappers on best effort basis.""" + if depth > 10: + return + if isinstance(v, StreamWrapper): + v.close() + else: + # Traverse only simple structures + if isinstance(v, dict): + for vv in v.values(): + cls.close_streams(vv, depth=depth + 1) + elif isinstance(v, (list, tuple)): + for vv in v: + cls.close_streams(vv, depth=depth + 1) + + def __getattr__(self, name): + file_obj = self.__dict__['file_obj'] + return getattr(file_obj, name) + + def close(self, *args, **kwargs): + if self.closed: + return + if StreamWrapper.debug_unclosed_streams: + del StreamWrapper.session_streams[self] + if hasattr(self, "parent_stream") and self.parent_stream is not None: + self.parent_stream.child_counter -= 1 + if not self.parent_stream.child_counter and self.parent_stream.close_on_last_child: + self.parent_stream.close() + try: + self.file_obj.close(*args, **kwargs) + except AttributeError: + pass + self.closed = True + + def autoclose(self): + """Automatically close stream when all child streams are closed or if there are none.""" + self.close_on_last_child = True + if self.child_counter == 0: + self.close() + + def __dir__(self): + attrs = list(self.__dict__.keys()) + list(StreamWrapper.__dict__.keys()) + attrs += dir(self.file_obj) + return list(set(attrs)) + + def __del__(self): + if not self.closed: + self.close() + + def __iter__(self): + yield from self.file_obj + + def __next__(self): + return next(self.file_obj) + + def __repr__(self): + if self.name is None: + return f"StreamWrapper<{self.file_obj!r}>" + else: + return f"StreamWrapper<{self.name},{self.file_obj!r}>" + + def __getstate__(self): + return self.file_obj + + def __setstate__(self, obj): + self.file_obj = obj diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..0211a8fe4ba462a768d41e95f2a00c4084aec7df --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py @@ -0,0 +1,330 @@ +# This file takes partial of the implementation from NVIDIA's webdataset at here: +# https://github.com/tmbdev/webdataset/blob/master/webdataset/autodecode.py + +import io +import json +import os.path +import pickle +import tempfile + +import torch +from torch.utils.data.datapipes.utils.common import StreamWrapper + + +__all__ = [ + "Decoder", + "ImageHandler", + "MatHandler", + "audiohandler", + "basichandlers", + "extension_extract_fn", + "handle_extension", + "imagehandler", + "mathandler", + "videohandler", +] + + +################################################################ +# handle basic datatypes +################################################################ +def basichandlers(extension, data): + + if extension in "txt text transcript": + return data.decode("utf-8") + + if extension in "cls cls2 class count index inx id".split(): + try: + return int(data) + except ValueError: + return None + + if extension in "json jsn": + return json.loads(data) + + if extension in "pyd pickle".split(): + return pickle.loads(data) + + if extension in "pt".split(): + stream = io.BytesIO(data) + return torch.load(stream) + + # if extension in "ten tb".split(): + # from . import tenbin + # return tenbin.decode_buffer(data) + + # if extension in "mp msgpack msg".split(): + # import msgpack + # return msgpack.unpackb(data) + + return None + + +################################################################ +# handle images +################################################################ +imagespecs = { + "l8": ("numpy", "uint8", "l"), + "rgb8": ("numpy", "uint8", "rgb"), + "rgba8": ("numpy", "uint8", "rgba"), + "l": ("numpy", "float", "l"), + "rgb": ("numpy", "float", "rgb"), + "rgba": ("numpy", "float", "rgba"), + "torchl8": ("torch", "uint8", "l"), + "torchrgb8": ("torch", "uint8", "rgb"), + "torchrgba8": ("torch", "uint8", "rgba"), + "torchl": ("torch", "float", "l"), + "torchrgb": ("torch", "float", "rgb"), + "torch": ("torch", "float", "rgb"), + "torchrgba": ("torch", "float", "rgba"), + "pill": ("pil", None, "l"), + "pil": ("pil", None, "rgb"), + "pilrgb": ("pil", None, "rgb"), + "pilrgba": ("pil", None, "rgba"), +} + +def handle_extension(extensions, f): + """ + Return a decoder handler function for the list of extensions. + + Extensions can be a space separated list of extensions. + Extensions can contain dots, in which case the corresponding number + of extension components must be present in the key given to f. + Comparisons are case insensitive. + Examples: + handle_extension("jpg jpeg", my_decode_jpg) # invoked for any file.jpg + handle_extension("seg.jpg", special_case_jpg) # invoked only for file.seg.jpg + """ + extensions = extensions.lower().split() + + def g(key, data): + extension = key.lower().split(".") + + for target in extensions: + target = target.split(".") + if len(target) > len(extension): + continue + + if extension[-len(target):] == target: + return f(data) + return None + return g + + +class ImageHandler: + """ + Decode image data using the given `imagespec`. + + The `imagespec` specifies whether the image is decoded + to numpy/torch/pi, decoded to uint8/float, and decoded + to l/rgb/rgba: + + - l8: numpy uint8 l + - rgb8: numpy uint8 rgb + - rgba8: numpy uint8 rgba + - l: numpy float l + - rgb: numpy float rgb + - rgba: numpy float rgba + - torchl8: torch uint8 l + - torchrgb8: torch uint8 rgb + - torchrgba8: torch uint8 rgba + - torchl: torch float l + - torchrgb: torch float rgb + - torch: torch float rgb + - torchrgba: torch float rgba + - pill: pil None l + - pil: pil None rgb + - pilrgb: pil None rgb + - pilrgba: pil None rgba + """ + + def __init__(self, imagespec): + assert imagespec in list(imagespecs.keys()), f"unknown image specification: {imagespec}" + self.imagespec = imagespec.lower() + + def __call__(self, extension, data): + if extension.lower() not in "jpg jpeg png ppm pgm pbm pnm".split(): + return None + + try: + import numpy as np + except ImportError as e: + raise ModuleNotFoundError("Package `numpy` is required to be installed for default image decoder." + "Please use `pip install numpy` to install the package") from e + + try: + import PIL.Image + except ImportError as e: + raise ModuleNotFoundError("Package `PIL` is required to be installed for default image decoder." + "Please use `pip install Pillow` to install the package") from e + + imagespec = self.imagespec + atype, etype, mode = imagespecs[imagespec] + + with io.BytesIO(data) as stream: + img = PIL.Image.open(stream) + img.load() + img = img.convert(mode.upper()) + if atype == "pil": + return img + elif atype == "numpy": + result = np.asarray(img) + assert result.dtype == np.uint8, f"numpy image array should be type uint8, but got {result.dtype}" + if etype == "uint8": + return result + else: + return result.astype("f") / 255.0 + elif atype == "torch": + result = np.asarray(img) + assert result.dtype == np.uint8, f"numpy image array should be type uint8, but got {result.dtype}" + + if etype == "uint8": + result = np.array(result.transpose(2, 0, 1)) + return torch.tensor(result) + else: + result = np.array(result.transpose(2, 0, 1)) + return torch.tensor(result) / 255.0 + return None + +def imagehandler(imagespec): + return ImageHandler(imagespec) + + +################################################################ +# torch video +################################################################ +def videohandler(extension, data): + if extension not in "mp4 ogv mjpeg avi mov h264 mpg webm wmv".split(): + return None + + try: + import torchvision.io + except ImportError as e: + raise ModuleNotFoundError("Package `torchvision` is required to be installed for default video file loader." + "Please use `pip install torchvision` or `conda install torchvision -c pytorch`" + "to install the package") from e + + with tempfile.TemporaryDirectory() as dirname: + fname = os.path.join(dirname, f"file.{extension}") + with open(fname, "wb") as stream: + stream.write(data) + return torchvision.io.read_video(fname) + + +################################################################ +# torchaudio +################################################################ +def audiohandler(extension, data): + if extension not in ["flac", "mp3", "sox", "wav", "m4a", "ogg", "wma"]: + return None + + try: + import torchaudio # type: ignore[import] + except ImportError as e: + raise ModuleNotFoundError("Package `torchaudio` is required to be installed for default audio file loader." + "Please use `pip install torchaudio` or `conda install torchaudio -c pytorch`" + "to install the package") from e + + with tempfile.TemporaryDirectory() as dirname: + fname = os.path.join(dirname, f"file.{extension}") + with open(fname, "wb") as stream: + stream.write(data) + return torchaudio.load(fname) + + +################################################################ +# mat +################################################################ +class MatHandler: + def __init__(self, **loadmat_kwargs) -> None: + try: + import scipy.io as sio + except ImportError as e: + raise ModuleNotFoundError("Package `scipy` is required to be installed for mat file." + "Please use `pip install scipy` or `conda install scipy`" + "to install the package") from e + self.sio = sio + self.loadmat_kwargs = loadmat_kwargs + + def __call__(self, extension, data): + if extension != 'mat': + return None + with io.BytesIO(data) as stream: + return self.sio.loadmat(stream, **self.loadmat_kwargs) + +def mathandler(**loadmat_kwargs): + return MatHandler(**loadmat_kwargs) + + +################################################################ +# a sample decoder +################################################################ +# Extract extension from pathname +def extension_extract_fn(pathname): + ext = os.path.splitext(pathname)[1] + # Remove dot + if ext: + ext = ext[1:] + return ext + + +class Decoder: + """ + Decode key/data sets using a list of handlers. + + For each key/data item, this iterates through the list of + handlers until some handler returns something other than None. + """ + + def __init__(self, *handler, key_fn=extension_extract_fn): + self.handlers = list(handler) if handler else [] + self.key_fn = key_fn + + # Insert new handler from the beginning of handlers list to make sure the new + # handler having the highest priority + def add_handler(self, *handler): + if not handler: + return + self.handlers = list(handler) + self.handlers + + @staticmethod + def _is_stream_handle(data): + obj_to_check = data.file_obj if isinstance(data, StreamWrapper) else data + return isinstance(obj_to_check, (io.BufferedIOBase, io.RawIOBase)) + + def decode1(self, key, data): + if not data: + return data + + # if data is a stream handle, we need to read all the content before decoding + if Decoder._is_stream_handle(data): + ds = data + # The behavior of .read can differ between streams (e.g. HTTPResponse), hence this is used instead + data = b"".join(data) + ds.close() + + for f in self.handlers: + result = f(key, data) + if result is not None: + return result + return data + + def decode(self, data): + result = {} + # single data tuple(pathname, data stream) + if isinstance(data, tuple): + data = [data] + + if data is not None: + for k, v in data: + # TODO: xinyu, figure out why Nvidia do this? + if k[0] == "_": + if isinstance(v, bytes): + v = v.decode("utf-8") + result[k] = v + continue + result[k] = self.decode1(self.key_fn(k), v) + return result + + def __call__(self, data): + return self.decode(data) diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..02487d0da5737363a59bdcd18a4fe16ead2fdcbb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py @@ -0,0 +1,58 @@ +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.graph_settings import apply_random_seed + + +# TODO: Caveats +# 1. Caller (either the ReadingService or DataLoader) must pass in the initial RNG +# 2. `in_batch_shuffle` and `bucketbatch` are not compatible with this because they currently +# lack the option to `set_seed`. +def _simple_graph_snapshot_restoration(datapipe: IterDataPipe, n_iterations: int, rng=None) -> None: + r""" + Fast-forward the given DataPipe and its parents by ``n_iterations``, re-doing computations to restore a snapshot. + + For instance, applying this function to the final DataPipe of a graph will restore the snapshot + (via fast-forward) every DataPipe within the graph. + + After you deserialize a DataPipe, you can use its `_number_of_samples_yielded` attribute as the input + to this function to forward the DataPipe. + + A DataPipe cannot be restored twice in a row unless there is an iteration started between the restoration + attempts. + + Note: + This is the simplest but least efficient way to fast-forward a DataPipe. Usage of other fast-forwarding + methods (custom ones if necessary) are recommended. + + Args: + datapipe: IterDataPipe to be fast-forwarded + n_iterations: number of iterations to fast-forward + rng: ``Optional[torch.Generator]``. If not ``None``, this RNG will be used for shuffling. The generator + should be in its `initial` state as it was first passed into ``DataLoader`` or ``ReadingService``. + """ + if datapipe._snapshot_state == _SnapshotState.Restored: + raise RuntimeError( + "Snapshot restoration cannot be applied. You can only restore simple snapshot to the graph " + "if your graph has not been restored.") + + # For this snapshot restoration function, we want the DataPipe to be at its initial state prior to + # simple fast-forwarding. Therefore, we need to call `reset` twice, because if `SnapshotState` is `Restored`, + # the first reset will not actually reset. + datapipe.reset() # This ensures `SnapshotState` is `Iterating` by this point, even if it was `Restored`. + apply_random_seed(datapipe, rng) + + remainder = n_iterations + it = iter(datapipe) # This always reset the DataPipe if it hasn't already. + while remainder > 0: + try: + next(it) + remainder -= 1 + except StopIteration as e: + raise RuntimeError(f"Fast-forward {datapipe} by {n_iterations} iterations " + "exceeds the number of samples available.") from e + datapipe._fast_forward_iterator = it + # While the DataPipe has `_fast_forward_iterator`, `next()` will get result from there instead of elsewhere. + + # This will prevent the DataPipe from resetting in the `iter()` call + # If another DataPipe is consuming it, it won't have to start over again + datapipe._snapshot_state = _SnapshotState.Restored diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/dataset.py b/venv/lib/python3.10/site-packages/torch/utils/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..554bf90d108bdd4e76e1e0e001be960dc9b41255 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/dataset.py @@ -0,0 +1,488 @@ +import bisect +import itertools +import math +import warnings +from typing import ( + cast, + Dict, + Generic, + Iterable, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +# No 'default_generator' in torch/__init__.pyi +from torch import default_generator, randperm + +from ... import Generator, Tensor + +__all__ = [ + "Dataset", + "IterableDataset", + "TensorDataset", + "StackDataset", + "ConcatDataset", + "ChainDataset", + "Subset", + "random_split", +] + +T_co = TypeVar("T_co", covariant=True) +T = TypeVar("T") +T_dict = Dict[str, T_co] +T_tuple = Tuple[T_co, ...] +T_stack = TypeVar("T_stack", T_tuple, T_dict) + + +class Dataset(Generic[T_co]): + r"""An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + """ + + def __getitem__(self, index) -> T_co: + raise NotImplementedError("Subclasses of Dataset should implement __getitem__.") + + # def __getitems__(self, indices: List) -> List[T_co]: + # Not implemented to prevent false-positives in fetcher check in + # torch.utils.data._utils.fetch._MapDatasetFetcher + + def __add__(self, other: "Dataset[T_co]") -> "ConcatDataset[T_co]": + return ConcatDataset([self, other]) + + # No `def __len__(self)` default? + # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + # in pytorch/torch/utils/data/sampler.py + + +class IterableDataset(Dataset[T_co], Iterable[T_co]): + r"""An iterable Dataset. + + All datasets that represent an iterable of data samples should subclass it. + Such form of datasets is particularly useful when data come from a stream. + + All subclasses should overwrite :meth:`__iter__`, which would return an + iterator of samples in this dataset. + + When a subclass is used with :class:`~torch.utils.data.DataLoader`, each + item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader` + iterator. When :attr:`num_workers > 0`, each worker process will have a + different copy of the dataset object, so it is often desired to configure + each copy independently to avoid having duplicate data returned from the + workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker + process, returns information about the worker. It can be used in either the + dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's + :attr:`worker_init_fn` option to modify each copy's behavior. + + Example 1: splitting workload across all workers in :meth:`__iter__`:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER) + >>> # xdoctest: +SKIP("Fails on MacOS12") + >>> class MyIterableDataset(torch.utils.data.IterableDataset): + ... def __init__(self, start, end): + ... super(MyIterableDataset).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... worker_info = torch.utils.data.get_worker_info() + ... if worker_info is None: # single-process data loading, return the full iterator + ... iter_start = self.start + ... iter_end = self.end + ... else: # in a worker process + ... # split workload + ... per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers))) + ... worker_id = worker_info.id + ... iter_start = self.start + worker_id * per_worker + ... iter_end = min(iter_start + per_worker, self.end) + ... return iter(range(iter_start, iter_end)) + ... + >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6]. + >>> ds = MyIterableDataset(start=3, end=7) + + >>> # Single-process loading + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0))) + [tensor([3]), tensor([4]), tensor([5]), tensor([6])] + + >>> # xdoctest: +REQUIRES(POSIX) + >>> # Mult-process loading with two worker processes + >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6]. + >>> # xdoctest: +IGNORE_WANT("non deterministic") + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2))) + [tensor([3]), tensor([5]), tensor([4]), tensor([6])] + + >>> # With even more workers + >>> # xdoctest: +IGNORE_WANT("non deterministic") + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12))) + [tensor([3]), tensor([5]), tensor([4]), tensor([6])] + + Example 2: splitting workload across all workers using :attr:`worker_init_fn`:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER) + >>> class MyIterableDataset(torch.utils.data.IterableDataset): + ... def __init__(self, start, end): + ... super(MyIterableDataset).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6]. + >>> ds = MyIterableDataset(start=3, end=7) + + >>> # Single-process loading + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0))) + [3, 4, 5, 6] + >>> + >>> # Directly doing multi-process loading yields duplicate data + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2))) + [3, 3, 4, 4, 5, 5, 6, 6] + + >>> # Define a `worker_init_fn` that configures each dataset copy differently + >>> def worker_init_fn(worker_id): + ... worker_info = torch.utils.data.get_worker_info() + ... dataset = worker_info.dataset # the dataset copy in this worker process + ... overall_start = dataset.start + ... overall_end = dataset.end + ... # configure the dataset to only process the split workload + ... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers))) + ... worker_id = worker_info.id + ... dataset.start = overall_start + worker_id * per_worker + ... dataset.end = min(dataset.start + per_worker, overall_end) + ... + + >>> # Mult-process loading with the custom `worker_init_fn` + >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6]. + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn))) + [3, 5, 4, 6] + + >>> # With even more workers + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12, worker_init_fn=worker_init_fn))) + [3, 4, 5, 6] + """ + + def __add__(self, other: Dataset[T_co]): + return ChainDataset([self, other]) + + # No `def __len__(self)` default? Subclasses raise `TypeError` when needed. + # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + + +class TensorDataset(Dataset[Tuple[Tensor, ...]]): + r"""Dataset wrapping tensors. + + Each sample will be retrieved by indexing tensors along the first dimension. + + Args: + *tensors (Tensor): tensors that have the same size of the first dimension. + """ + + tensors: Tuple[Tensor, ...] + + def __init__(self, *tensors: Tensor) -> None: + assert all( + tensors[0].size(0) == tensor.size(0) for tensor in tensors + ), "Size mismatch between tensors" + self.tensors = tensors + + def __getitem__(self, index): + return tuple(tensor[index] for tensor in self.tensors) + + def __len__(self): + return self.tensors[0].size(0) + + +class StackDataset(Dataset[T_stack]): + r"""Dataset as a stacking of multiple datasets. + + This class is useful to assemble different parts of complex input data, given as datasets. + + Example: + >>> # xdoctest: +SKIP + >>> images = ImageDataset() + >>> texts = TextDataset() + >>> tuple_stack = StackDataset(images, texts) + >>> tuple_stack[0] == (images[0], texts[0]) + >>> dict_stack = StackDataset(image=images, text=texts) + >>> dict_stack[0] == {'image': images[0], 'text': texts[0]} + + Args: + *args (Dataset): Datasets for stacking returned as tuple. + **kwargs (Dataset): Datasets for stacking returned as dict. + """ + + datasets: Union[tuple, dict] + + def __init__(self, *args: Dataset[T_co], **kwargs: Dataset[T_co]) -> None: + if args: + if kwargs: + raise ValueError( + "Supported either ``tuple``- (via ``args``) or" + "``dict``- (via ``kwargs``) like input/output, but both types are given." + ) + self._length = len(args[0]) # type: ignore[arg-type] + if any(self._length != len(dataset) for dataset in args): # type: ignore[arg-type] + raise ValueError("Size mismatch between datasets") + self.datasets = args + elif kwargs: + tmp = list(kwargs.values()) + self._length = len(tmp[0]) # type: ignore[arg-type] + if any(self._length != len(dataset) for dataset in tmp): # type: ignore[arg-type] + raise ValueError("Size mismatch between datasets") + self.datasets = kwargs + else: + raise ValueError("At least one dataset should be passed") + + def __getitem__(self, index): + if isinstance(self.datasets, dict): + return {k: dataset[index] for k, dataset in self.datasets.items()} + return tuple(dataset[index] for dataset in self.datasets) + + def __getitems__(self, indices: list): + # add batched sampling support when parent datasets supports it. + if isinstance(self.datasets, dict): + dict_batch: List[T_dict] = [{} for _ in indices] + for k, dataset in self.datasets.items(): + if callable(getattr(dataset, "__getitems__", None)): + items = dataset.__getitems__(indices) # type: ignore[attr-defined] + if len(items) != len(indices): + raise ValueError( + "Nested dataset's output size mismatch." + f" Expected {len(indices)}, got {len(items)}" + ) + for data, d_sample in zip(items, dict_batch): + d_sample[k] = data + else: + for idx, d_sample in zip(indices, dict_batch): + d_sample[k] = dataset[idx] + return dict_batch + + # tuple data + list_batch: List[list] = [[] for _ in indices] + for dataset in self.datasets: + if callable(getattr(dataset, "__getitems__", None)): + items = dataset.__getitems__(indices) # type: ignore[attr-defined] + if len(items) != len(indices): + raise ValueError( + "Nested dataset's output size mismatch." + f" Expected {len(indices)}, got {len(items)}" + ) + for data, t_sample in zip(items, list_batch): + t_sample.append(data) + else: + for idx, t_sample in zip(indices, list_batch): + t_sample.append(dataset[idx]) + tuple_batch: List[T_tuple] = [tuple(sample) for sample in list_batch] + return tuple_batch + + def __len__(self): + return self._length + + +class ConcatDataset(Dataset[T_co]): + r"""Dataset as a concatenation of multiple datasets. + + This class is useful to assemble different existing datasets. + + Args: + datasets (sequence): List of datasets to be concatenated + """ + + datasets: List[Dataset[T_co]] + cumulative_sizes: List[int] + + @staticmethod + def cumsum(sequence): + r, s = [], 0 + for e in sequence: + l = len(e) + r.append(l + s) + s += l + return r + + def __init__(self, datasets: Iterable[Dataset]) -> None: + super().__init__() + self.datasets = list(datasets) + assert len(self.datasets) > 0, "datasets should not be an empty iterable" # type: ignore[arg-type] + for d in self.datasets: + assert not isinstance( + d, IterableDataset + ), "ConcatDataset does not support IterableDataset" + self.cumulative_sizes = self.cumsum(self.datasets) + + def __len__(self): + return self.cumulative_sizes[-1] + + def __getitem__(self, idx): + if idx < 0: + if -idx > len(self): + raise ValueError( + "absolute value of index should not exceed dataset length" + ) + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx][sample_idx] + + @property + def cummulative_sizes(self): + warnings.warn( + "cummulative_sizes attribute is renamed to " "cumulative_sizes", + DeprecationWarning, + stacklevel=2, + ) + return self.cumulative_sizes + + +class ChainDataset(IterableDataset): + r"""Dataset for chaining multiple :class:`IterableDataset` s. + + This class is useful to assemble different existing dataset streams. The + chaining operation is done on-the-fly, so concatenating large-scale + datasets with this class will be efficient. + + Args: + datasets (iterable of IterableDataset): datasets to be chained together + """ + + def __init__(self, datasets: Iterable[Dataset]) -> None: + super().__init__() + self.datasets = datasets + + def __iter__(self): + for d in self.datasets: + assert isinstance( + d, IterableDataset + ), "ChainDataset only supports IterableDataset" + yield from d + + def __len__(self): + total = 0 + for d in self.datasets: + assert isinstance( + d, IterableDataset + ), "ChainDataset only supports IterableDataset" + total += len(d) # type: ignore[arg-type] + return total + + +class Subset(Dataset[T_co]): + r""" + Subset of a dataset at specified indices. + + Args: + dataset (Dataset): The whole Dataset + indices (sequence): Indices in the whole set selected for subset + """ + + dataset: Dataset[T_co] + indices: Sequence[int] + + def __init__(self, dataset: Dataset[T_co], indices: Sequence[int]) -> None: + self.dataset = dataset + self.indices = indices + + def __getitem__(self, idx): + if isinstance(idx, list): + return self.dataset[[self.indices[i] for i in idx]] + return self.dataset[self.indices[idx]] + + def __getitems__(self, indices: List[int]) -> List[T_co]: + # add batched sampling support when parent dataset supports it. + # see torch.utils.data._utils.fetch._MapDatasetFetcher + if callable(getattr(self.dataset, "__getitems__", None)): + return self.dataset.__getitems__([self.indices[idx] for idx in indices]) # type: ignore[attr-defined] + else: + return [self.dataset[self.indices[idx]] for idx in indices] + + def __len__(self): + return len(self.indices) + + +def random_split( + dataset: Dataset[T], + lengths: Sequence[Union[int, float]], + generator: Optional[Generator] = default_generator, +) -> List[Subset[T]]: + r""" + Randomly split a dataset into non-overlapping new datasets of given lengths. + + If a list of fractions that sum up to 1 is given, + the lengths will be computed automatically as + floor(frac * len(dataset)) for each fraction provided. + + After computing the lengths, if there are any remainders, 1 count will be + distributed in round-robin fashion to the lengths + until there are no remainders left. + + Optionally fix the generator for reproducible results, e.g.: + + Example: + >>> # xdoctest: +SKIP + >>> generator1 = torch.Generator().manual_seed(42) + >>> generator2 = torch.Generator().manual_seed(42) + >>> random_split(range(10), [3, 7], generator=generator1) + >>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2) + + Args: + dataset (Dataset): Dataset to be split + lengths (sequence): lengths or fractions of splits to be produced + generator (Generator): Generator used for the random permutation. + """ + if math.isclose(sum(lengths), 1) and sum(lengths) <= 1: + subset_lengths: List[int] = [] + for i, frac in enumerate(lengths): + if frac < 0 or frac > 1: + raise ValueError(f"Fraction at index {i} is not between 0 and 1") + n_items_in_split = int( + math.floor(len(dataset) * frac) # type: ignore[arg-type] + ) + subset_lengths.append(n_items_in_split) + remainder = len(dataset) - sum(subset_lengths) # type: ignore[arg-type] + # add 1 to all the lengths in round-robin fashion until the remainder is 0 + for i in range(remainder): + idx_to_add_at = i % len(subset_lengths) + subset_lengths[idx_to_add_at] += 1 + lengths = subset_lengths + for i, length in enumerate(lengths): + if length == 0: + warnings.warn( + f"Length of split at index {i} is 0. " + f"This might result in an empty dataset." + ) + + # Cannot verify that dataset is Sized + if sum(lengths) != len(dataset): # type: ignore[arg-type] + raise ValueError( + "Sum of input lengths does not equal the length of the input dataset!" + ) + + indices = randperm(sum(lengths), generator=generator).tolist() # type: ignore[arg-type, call-overload] + lengths = cast(Sequence[int], lengths) + return [ + Subset(dataset, indices[offset - length : offset]) + for offset, length in zip(itertools.accumulate(lengths), lengths) + ] diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/distributed.py b/venv/lib/python3.10/site-packages/torch/utils/data/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..3d2141b8430fc59a92a0a07466630708b8cb4be7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/distributed.py @@ -0,0 +1,137 @@ +import math +from typing import TypeVar, Optional, Iterator + +import torch +from . import Sampler, Dataset +import torch.distributed as dist + +__all__ = ["DistributedSampler", ] + +T_co = TypeVar('T_co', covariant=True) + + +class DistributedSampler(Sampler[T_co]): + r"""Sampler that restricts data loading to a subset of the dataset. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each + process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a + :class:`~torch.utils.data.DataLoader` sampler, and load a subset of the + original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size and that any instance of it always + returns the same elements in the same order. + + Args: + dataset: Dataset used for sampling. + num_replicas (int, optional): Number of processes participating in + distributed training. By default, :attr:`world_size` is retrieved from the + current distributed group. + rank (int, optional): Rank of the current process within :attr:`num_replicas`. + By default, :attr:`rank` is retrieved from the current distributed + group. + shuffle (bool, optional): If ``True`` (default), sampler will shuffle the + indices. + seed (int, optional): random seed used to shuffle the sampler if + :attr:`shuffle=True`. This number should be identical across all + processes in the distributed group. Default: ``0``. + drop_last (bool, optional): if ``True``, then the sampler will drop the + tail of the data to make it evenly divisible across the number of + replicas. If ``False``, the sampler will add extra indices to make + the data evenly divisible across the replicas. Default: ``False``. + + .. warning:: + In distributed mode, calling the :meth:`set_epoch` method at + the beginning of each epoch **before** creating the :class:`DataLoader` iterator + is necessary to make shuffling work properly across multiple epochs. Otherwise, + the same ordering will be always used. + + Example:: + + >>> # xdoctest: +SKIP + >>> sampler = DistributedSampler(dataset) if is_distributed else None + >>> loader = DataLoader(dataset, shuffle=(sampler is None), + ... sampler=sampler) + >>> for epoch in range(start_epoch, n_epochs): + ... if is_distributed: + ... sampler.set_epoch(epoch) + ... train(loader) + """ + + def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None, + rank: Optional[int] = None, shuffle: bool = True, + seed: int = 0, drop_last: bool = False) -> None: + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + if rank >= num_replicas or rank < 0: + raise ValueError( + f"Invalid rank {rank}, rank should be in the interval [0, {num_replicas - 1}]") + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.drop_last = drop_last + # If the dataset length is evenly divisible by # of replicas, then there + # is no need to drop any data, since the dataset will be split equally. + if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type] + # Split to nearest available length that is evenly divisible. + # This is to ensure each rank receives the same amount of data when + # using this Sampler. + self.num_samples = math.ceil( + (len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type] + ) + else: + self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type] + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + self.seed = seed + + def __iter__(self) -> Iterator[T_co]: + if self.shuffle: + # deterministically shuffle based on epoch and seed + g = torch.Generator() + g.manual_seed(self.seed + self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type] + else: + indices = list(range(len(self.dataset))) # type: ignore[arg-type] + + if not self.drop_last: + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + if padding_size <= len(indices): + indices += indices[:padding_size] + else: + indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size] + else: + # remove tail of data to make it evenly divisible. + indices = indices[:self.total_size] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self) -> int: + return self.num_samples + + def set_epoch(self, epoch: int) -> None: + r""" + Set the epoch for this sampler. + + When :attr:`shuffle=True`, this ensures all replicas + use a different random ordering for each epoch. Otherwise, the next iteration of this + sampler will yield the same ordering. + + Args: + epoch (int): Epoch number. + """ + self.epoch = epoch diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/graph.py b/venv/lib/python3.10/site-packages/torch/utils/data/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..cd78db474d5e06187b8b7ec63da6e33b7619f798 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/graph.py @@ -0,0 +1,149 @@ +import io +import pickle +import warnings + +from collections.abc import Collection +from typing import Dict, List, Optional, Set, Tuple, Type, Union + +from torch.utils.data import IterDataPipe, MapDataPipe +from torch.utils._import_utils import dill_available + + +__all__ = ["traverse", "traverse_dps"] + +DataPipe = Union[IterDataPipe, MapDataPipe] +DataPipeGraph = Dict[int, Tuple[DataPipe, "DataPipeGraph"]] # type: ignore[misc] + + +def _stub_unpickler(): + return "STUB" + + +# TODO(VitalyFedyunin): Make sure it works without dill module installed +def _list_connected_datapipes(scan_obj: DataPipe, only_datapipe: bool, cache: Set[int]) -> List[DataPipe]: + f = io.BytesIO() + p = pickle.Pickler(f) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is + if dill_available(): + from dill import Pickler as dill_Pickler + d = dill_Pickler(f) + else: + d = None + + captured_connections = [] + + def getstate_hook(ori_state): + state = None + if isinstance(ori_state, dict): + state = {} # type: ignore[assignment] + for k, v in ori_state.items(): + if isinstance(v, (IterDataPipe, MapDataPipe, Collection)): + state[k] = v # type: ignore[attr-defined] + elif isinstance(ori_state, (tuple, list)): + state = [] # type: ignore[assignment] + for v in ori_state: + if isinstance(v, (IterDataPipe, MapDataPipe, Collection)): + state.append(v) # type: ignore[attr-defined] + elif isinstance(ori_state, (IterDataPipe, MapDataPipe, Collection)): + state = ori_state # type: ignore[assignment] + return state + + def reduce_hook(obj): + if obj == scan_obj or id(obj) in cache: + raise NotImplementedError + else: + captured_connections.append(obj) + # Adding id to remove duplicate DataPipe serialized at the same level + cache.add(id(obj)) + return _stub_unpickler, () + + datapipe_classes: Tuple[Type[DataPipe]] = (IterDataPipe, MapDataPipe) # type: ignore[assignment] + + try: + for cls in datapipe_classes: + cls.set_reduce_ex_hook(reduce_hook) + if only_datapipe: + cls.set_getstate_hook(getstate_hook) + try: + p.dump(scan_obj) + except (pickle.PickleError, AttributeError, TypeError): + if dill_available(): + d.dump(scan_obj) + else: + raise + finally: + for cls in datapipe_classes: + cls.set_reduce_ex_hook(None) + if only_datapipe: + cls.set_getstate_hook(None) + if dill_available(): + from dill import extend as dill_extend + dill_extend(False) # Undo change to dispatch table + return captured_connections + + +def traverse_dps(datapipe: DataPipe) -> DataPipeGraph: + r""" + Traverse the DataPipes and their attributes to extract the DataPipe graph. + + This only looks into the attribute from each DataPipe that is either a + DataPipe and a Python collection object such as ``list``, ``tuple``, + ``set`` and ``dict``. + + Args: + datapipe: the end DataPipe of the graph + Returns: + A graph represented as a nested dictionary, where keys are ids of DataPipe instances + and values are tuples of DataPipe instance and the sub-graph + """ + cache: Set[int] = set() + return _traverse_helper(datapipe, only_datapipe=True, cache=cache) + + +def traverse(datapipe: DataPipe, only_datapipe: Optional[bool] = None) -> DataPipeGraph: + r""" + Traverse the DataPipes and their attributes to extract the DataPipe graph. + + [Deprecated] + When ``only_dataPipe`` is specified as ``True``, it would only look into the + attribute from each DataPipe that is either a DataPipe and a Python collection object + such as ``list``, ``tuple``, ``set`` and ``dict``. + + Note: + This function is deprecated. Please use `traverse_dps` instead. + + Args: + datapipe: the end DataPipe of the graph + only_datapipe: If ``False`` (default), all attributes of each DataPipe are traversed. + This argument is deprecating and will be removed after the next release. + Returns: + A graph represented as a nested dictionary, where keys are ids of DataPipe instances + and values are tuples of DataPipe instance and the sub-graph + """ + msg = "`traverse` function and will be removed after 1.13. " \ + "Please use `traverse_dps` instead." + if not only_datapipe: + msg += " And, the behavior will be changed to the equivalent of `only_datapipe=True`." + warnings.warn(msg, FutureWarning) + if only_datapipe is None: + only_datapipe = False + cache: Set[int] = set() + return _traverse_helper(datapipe, only_datapipe, cache) + + +# Add cache here to prevent infinite recursion on DataPipe +def _traverse_helper(datapipe: DataPipe, only_datapipe: bool, cache: Set[int]) -> DataPipeGraph: + if not isinstance(datapipe, (IterDataPipe, MapDataPipe)): + raise RuntimeError(f"Expected `IterDataPipe` or `MapDataPipe`, but {type(datapipe)} is found") + + dp_id = id(datapipe) + if dp_id in cache: + return {} + cache.add(dp_id) + # Using cache.copy() here is to prevent the same DataPipe pollutes the cache on different paths + items = _list_connected_datapipes(datapipe, only_datapipe, cache.copy()) + d: DataPipeGraph = {dp_id: (datapipe, {})} + for item in items: + # Using cache.copy() here is to prevent recursion on a single path rather than global graph + # Single DataPipe can present multiple times in different paths in graph + d[dp_id][1].update(_traverse_helper(item, only_datapipe, cache.copy())) + return d diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/graph_settings.py b/venv/lib/python3.10/site-packages/torch/utils/data/graph_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..4b42cc6065a788e18afd38aea6fe6cdf63214430 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/graph_settings.py @@ -0,0 +1,160 @@ +import inspect +import warnings + +from typing import Any, List, Optional, Set + +import torch + +from torch.utils.data.datapipes.iter.sharding import ( + _ShardingIterDataPipe, + SHARDING_PRIORITIES, +) +from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse_dps + +__all__ = [ + "apply_random_seed", + "apply_sharding", + "apply_shuffle_seed", + "apply_shuffle_settings", + "get_all_graph_pipes", +] + + +def get_all_graph_pipes(graph: DataPipeGraph) -> List[DataPipe]: + return _get_all_graph_pipes_helper(graph, set()) + + +def _get_all_graph_pipes_helper(graph: DataPipeGraph, id_cache: Set[int]) -> List[DataPipe]: + results: List[DataPipe] = [] + for dp_id, (datapipe, sub_graph) in graph.items(): + if dp_id in id_cache: + continue + id_cache.add(dp_id) + results.append(datapipe) + results.extend(_get_all_graph_pipes_helper(sub_graph, id_cache)) + return results + + +def _is_sharding_datapipe(datapipe: DataPipe) -> bool: + if isinstance(datapipe, _ShardingIterDataPipe): + return True + if hasattr(datapipe, "apply_sharding") and inspect.ismethod(datapipe.apply_sharding): + return True + return False + + +def apply_sharding(datapipe: DataPipe, + num_of_instances: int, + instance_id: int, + sharding_group=SHARDING_PRIORITIES.DEFAULT) -> DataPipe: + r""" + Apply dynamic sharding over the ``sharding_filter`` DataPipe that has a method ``apply_sharding``. + + RuntimeError will be raised when multiple ``sharding_filter`` are presented in the same branch. + """ + graph = traverse_dps(datapipe) + + def _helper(graph, prev_applied=None): + for (dp, sub_graph) in graph.values(): + applied = None + if _is_sharding_datapipe(dp): + if prev_applied is not None: + raise RuntimeError("Sharding twice on a single pipeline is likely unintended and will cause data loss. " + f"Sharding already applied to {prev_applied} while trying to apply to {dp}") + # For BC, only provide sharding_group if accepted + sig = inspect.signature(dp.apply_sharding) + if len(sig.parameters) < 3: + dp.apply_sharding(num_of_instances, instance_id) + else: + dp.apply_sharding(num_of_instances, instance_id, sharding_group=sharding_group) + applied = dp + if applied is None: + applied = prev_applied + _helper(sub_graph, applied) + + _helper(graph) + + return datapipe + + +def _is_shuffle_datapipe(datapipe: DataPipe) -> bool: + if not hasattr(datapipe, "set_shuffle") or not hasattr(datapipe, "set_seed"): + return False + if not inspect.ismethod(datapipe.set_shuffle) or not inspect.ismethod(datapipe.set_seed): + return False + return True + + +def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool] = None) -> DataPipe: + r""" + Traverse the graph of ``DataPipes`` to find and set shuffle attribute. + + Apply the method to each `DataPipe` that has APIs of ``set_shuffle`` + and ``set_seed``. + + Args: + datapipe: DataPipe that needs to set shuffle attribute + shuffle: Shuffle option (default: ``None`` and no-op to the graph) + """ + if shuffle is None: + return datapipe + + graph = traverse_dps(datapipe) + all_pipes = get_all_graph_pipes(graph) + shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)] + if not shufflers and shuffle: + warnings.warn( + "`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. " + "Be aware that the default buffer size might not be sufficient for your task." + ) + datapipe = datapipe.shuffle() + shufflers = [datapipe, ] # type: ignore[list-item] + + for shuffler in shufflers: + shuffler.set_shuffle(shuffle) + + return datapipe + + +def apply_shuffle_seed(datapipe: DataPipe, rng: Any) -> DataPipe: + warnings.warn( + "`apply_shuffle_seed` is deprecated since 1.12 and will be removed in the future releases." + "\nPlease use `apply_random_seed` instead." + ) + return apply_random_seed(datapipe, rng) + + +def _is_random_datapipe(datapipe: DataPipe) -> bool: + if hasattr(datapipe, "set_seed") and inspect.ismethod(datapipe.set_seed): + return True + return False + + +def apply_random_seed(datapipe: DataPipe, rng: torch.Generator) -> DataPipe: + r""" + Traverse the graph of ``DataPipes`` to find random ``DataPipe`` with an API of ``set_seed``. + + Then set the random seed based on the provided RNG to those ``DataPipe``. + + Args: + datapipe: DataPipe that needs to set randomness + rng: Random number generator to generate random seeds + """ + graph = traverse_dps(datapipe) + all_pipes = get_all_graph_pipes(graph) + # Using a set to track id of DataPipe to prevent setting randomness per DataPipe more than once. + # And, `id` is used in case of unhashable DataPipe + cache = set() + random_datapipes = [] + for pipe in all_pipes: + if id(pipe) in cache: + continue + if _is_random_datapipe(pipe): + random_datapipes.append(pipe) + cache.add(id(pipe)) + + for pipe in random_datapipes: + random_seed = int(torch.empty((), dtype=torch.int64).random_(generator=rng).item()) + pipe.set_seed(random_seed) + + return datapipe diff --git a/venv/lib/python3.10/site-packages/torch/utils/data/sampler.py b/venv/lib/python3.10/site-packages/torch/utils/data/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd91d0ac1576fad6356062bb1f82deea9cd77ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/data/sampler.py @@ -0,0 +1,305 @@ +import torch +from torch import Tensor + +from typing import Iterator, Iterable, Optional, Sequence, List, TypeVar, Generic, Sized, Union + +__all__ = [ + "BatchSampler", + "RandomSampler", + "Sampler", + "SequentialSampler", + "SubsetRandomSampler", + "WeightedRandomSampler", +] + +T_co = TypeVar('T_co', covariant=True) + + +class Sampler(Generic[T_co]): + r"""Base class for all Samplers. + + Every Sampler subclass has to provide an :meth:`__iter__` method, providing a + way to iterate over indices or lists of indices (batches) of dataset elements, and a :meth:`__len__` method + that returns the length of the returned iterators. + + Args: + data_source (Dataset): This argument is not used and will be removed in 2.2.0. + You may still have custom implementation that utilizes it. + + Example: + >>> # xdoctest: +SKIP + >>> class AccedingSequenceLengthSampler(Sampler[int]): + >>> def __init__(self, data: List[str]) -> None: + >>> self.data = data + >>> + >>> def __len__(self) -> int: + >>> return len(self.data) + >>> + >>> def __iter__(self) -> Iterator[int]: + >>> sizes = torch.tensor([len(x) for x in self.data]) + >>> yield from torch.argsort(sizes).tolist() + >>> + >>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]): + >>> def __init__(self, data: List[str], batch_size: int) -> None: + >>> self.data = data + >>> self.batch_size = batch_size + >>> + >>> def __len__(self) -> int: + >>> return (len(self.data) + self.batch_size - 1) // self.batch_size + >>> + >>> def __iter__(self) -> Iterator[List[int]]: + >>> sizes = torch.tensor([len(x) for x in self.data]) + >>> for batch in torch.chunk(torch.argsort(sizes), len(self)): + >>> yield batch.tolist() + + .. note:: The :meth:`__len__` method isn't strictly required by + :class:`~torch.utils.data.DataLoader`, but is expected in any + calculation involving the length of a :class:`~torch.utils.data.DataLoader`. + """ + + def __init__(self, data_source: Optional[Sized] = None) -> None: + if data_source is not None: + import warnings + + warnings.warn("`data_source` argument is not used and will be removed in 2.2.0." + "You may still have custom implementation that utilizes it.") + + def __iter__(self) -> Iterator[T_co]: + raise NotImplementedError + + # NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + # + # Many times we have an abstract class representing a collection/iterable of + # data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally + # implementing a `__len__` method. In such cases, we must make sure to not + # provide a default implementation, because both straightforward default + # implementations have their issues: + # + # + `return NotImplemented`: + # Calling `len(subclass_instance)` raises: + # TypeError: 'NotImplementedType' object cannot be interpreted as an integer + # + # + `raise NotImplementedError()`: + # This prevents triggering some fallback behavior. E.g., the built-in + # `list(X)` tries to call `len(X)` first, and executes a different code + # path if the method is not found or `NotImplemented` is returned, while + # raising a `NotImplementedError` will propagate and make the call fail + # where it could have used `__iter__` to complete the call. + # + # Thus, the only two sensible things to do are + # + # + **not** provide a default `__len__`. + # + # + raise a `TypeError` instead, which is what Python uses when users call + # a method that is not defined on an object. + # (@ssnl verifies that this works on at least Python 3.7.) + + +class SequentialSampler(Sampler[int]): + r"""Samples elements sequentially, always in the same order. + + Args: + data_source (Dataset): dataset to sample from + """ + + data_source: Sized + + def __init__(self, data_source: Sized) -> None: + self.data_source = data_source + + def __iter__(self) -> Iterator[int]: + return iter(range(len(self.data_source))) + + def __len__(self) -> int: + return len(self.data_source) + + +class RandomSampler(Sampler[int]): + r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset. + + If with replacement, then user can specify :attr:`num_samples` to draw. + + Args: + data_source (Dataset): dataset to sample from + replacement (bool): samples are drawn on-demand with replacement if ``True``, default=``False`` + num_samples (int): number of samples to draw, default=`len(dataset)`. + generator (Generator): Generator used in sampling. + """ + + data_source: Sized + replacement: bool + + def __init__(self, data_source: Sized, replacement: bool = False, + num_samples: Optional[int] = None, generator=None) -> None: + self.data_source = data_source + self.replacement = replacement + self._num_samples = num_samples + self.generator = generator + + if not isinstance(self.replacement, bool): + raise TypeError(f"replacement should be a boolean value, but got replacement={self.replacement}") + + if not isinstance(self.num_samples, int) or self.num_samples <= 0: + raise ValueError(f"num_samples should be a positive integer value, but got num_samples={self.num_samples}") + + @property + def num_samples(self) -> int: + # dataset size might change at runtime + if self._num_samples is None: + return len(self.data_source) + return self._num_samples + + def __iter__(self) -> Iterator[int]: + n = len(self.data_source) + if self.generator is None: + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + generator = torch.Generator() + generator.manual_seed(seed) + else: + generator = self.generator + + if self.replacement: + for _ in range(self.num_samples // 32): + yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=generator).tolist() + yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=generator).tolist() + else: + for _ in range(self.num_samples // n): + yield from torch.randperm(n, generator=generator).tolist() + yield from torch.randperm(n, generator=generator).tolist()[:self.num_samples % n] + + def __len__(self) -> int: + return self.num_samples + + +class SubsetRandomSampler(Sampler[int]): + r"""Samples elements randomly from a given list of indices, without replacement. + + Args: + indices (sequence): a sequence of indices + generator (Generator): Generator used in sampling. + """ + + indices: Sequence[int] + + def __init__(self, indices: Sequence[int], generator=None) -> None: + self.indices = indices + self.generator = generator + + def __iter__(self) -> Iterator[int]: + for i in torch.randperm(len(self.indices), generator=self.generator): + yield self.indices[i] + + def __len__(self) -> int: + return len(self.indices) + + +class WeightedRandomSampler(Sampler[int]): + r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights). + + Args: + weights (sequence) : a sequence of weights, not necessary summing up to one + num_samples (int): number of samples to draw + replacement (bool): if ``True``, samples are drawn with replacement. + If not, they are drawn without replacement, which means that when a + sample index is drawn for a row, it cannot be drawn again for that row. + generator (Generator): Generator used in sampling. + + Example: + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True)) + [4, 4, 1, 4, 5] + >>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False)) + [0, 1, 4, 3, 2] + """ + + weights: Tensor + num_samples: int + replacement: bool + + def __init__(self, weights: Sequence[float], num_samples: int, + replacement: bool = True, generator=None) -> None: + if not isinstance(num_samples, int) or isinstance(num_samples, bool) or \ + num_samples <= 0: + raise ValueError(f"num_samples should be a positive integer value, but got num_samples={num_samples}") + if not isinstance(replacement, bool): + raise ValueError(f"replacement should be a boolean value, but got replacement={replacement}") + + weights_tensor = torch.as_tensor(weights, dtype=torch.double) + if len(weights_tensor.shape) != 1: + raise ValueError("weights should be a 1d sequence but given " + f"weights have shape {tuple(weights_tensor.shape)}") + + self.weights = weights_tensor + self.num_samples = num_samples + self.replacement = replacement + self.generator = generator + + def __iter__(self) -> Iterator[int]: + rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator) + yield from iter(rand_tensor.tolist()) + + def __len__(self) -> int: + return self.num_samples + + +class BatchSampler(Sampler[List[int]]): + r"""Wraps another sampler to yield a mini-batch of indices. + + Args: + sampler (Sampler or Iterable): Base sampler. Can be any iterable object + batch_size (int): Size of mini-batch. + drop_last (bool): If ``True``, the sampler will drop the last batch if + its size would be less than ``batch_size`` + + Example: + >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] + >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + def __init__(self, sampler: Union[Sampler[int], Iterable[int]], batch_size: int, drop_last: bool) -> None: + # Since collections.abc.Iterable does not check for `__getitem__`, which + # is one way for an object to be an iterable, we don't do an `isinstance` + # check here. + if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \ + batch_size <= 0: + raise ValueError(f"batch_size should be a positive integer value, but got batch_size={batch_size}") + if not isinstance(drop_last, bool): + raise ValueError(f"drop_last should be a boolean value, but got drop_last={drop_last}") + self.sampler = sampler + self.batch_size = batch_size + self.drop_last = drop_last + + def __iter__(self) -> Iterator[List[int]]: + # Implemented based on the benchmarking in https://github.com/pytorch/pytorch/pull/76951 + if self.drop_last: + sampler_iter = iter(self.sampler) + while True: + try: + batch = [next(sampler_iter) for _ in range(self.batch_size)] + yield batch + except StopIteration: + break + else: + batch = [0] * self.batch_size + idx_in_batch = 0 + for idx in self.sampler: + batch[idx_in_batch] = idx + idx_in_batch += 1 + if idx_in_batch == self.batch_size: + yield batch + idx_in_batch = 0 + batch = [0] * self.batch_size + if idx_in_batch > 0: + yield batch[:idx_in_batch] + + def __len__(self) -> int: + # Can only be called if self.sampler has __len__ implemented + # We cannot enforce this condition, so we turn off typechecking for the + # implementation below. + # Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + if self.drop_last: + return len(self.sampler) // self.batch_size # type: ignore[arg-type] + else: + return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type] diff --git a/venv/lib/python3.10/site-packages/torch/utils/viz/__init__.py b/venv/lib/python3.10/site-packages/torch/utils/viz/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/utils/viz/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/viz/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bae99e5a90969e5581d56ba28812856265b58afc Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/viz/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/viz/__pycache__/_cycles.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/utils/viz/__pycache__/_cycles.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9470ff388bc2fb2e15ebeec64cbb0bd7cc95f40 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/utils/viz/__pycache__/_cycles.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/utils/viz/_cycles.py b/venv/lib/python3.10/site-packages/torch/utils/viz/_cycles.py new file mode 100644 index 0000000000000000000000000000000000000000..f17348e401c34a74d60337eb21009aebea278dd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/utils/viz/_cycles.py @@ -0,0 +1,447 @@ +import gc +import sys +from typing import Any, Dict, List, NamedTuple, Optional, Tuple +import types +import weakref +import json +from tempfile import NamedTemporaryFile +import torch +from torch.cuda._memory_viz import _frames_fmt, _block_extra +import atexit +import logging +logger = logging.getLogger(__name__) + +def observe_garbage(observer): + enabled = True + + def disable(): + # when GC runs during exit, things like `sys` will already be unloaded + # so we have to disable the callback to avoid hitting errors. + nonlocal enabled + enabled = False + atexit.register(disable) + + def gc_callback(phase, info): + nonlocal enabled + if not enabled: + return + if phase == "start": + gc.set_debug(gc.DEBUG_SAVEALL) + elif phase == "stop": + orig_trace = sys.getprofile() + self_return = [False] + + def do_collect(*args, **kwargs): + nonlocal enabled + if not self_return[0]: + self_return[0] = True + else: + sys.setprofile(orig_trace) + enabled = False + try: + # things in gc.garbage have survived a collection + # so to free them we have to collect a generation greater than them + # but that might _also_ free other stuff and we don't want to miss + # that stuff. So we have to now force gc at the highest level here, + # report all of what we found, _then_ we can free it up. + if info['generation'] != 2: + gc.collect() + observer(gc.garbage) + gc.garbage.clear() + # we have to re-run GC to clean up the cycles + # we saved from before. + gc.set_debug(0) + before = torch.cuda.memory_allocated() + gc.collect() + after = torch.cuda.memory_allocated() + if before != after: + logger.warning("CUDA Memory changed during GC, %d bytes freed.", before - after) + finally: + enabled = True + if orig_trace is not None: + return orig_trace(*args, **kwargs) + sys.setprofile(do_collect) + + gc.callbacks.append(gc_callback) + + # provide a way to disarm the callback + def remove(): + gc.callbacks.remove(gc_callback) + return remove + +# Function to visualize cycles adapated from refcycle: +# Copyright 2013 Mark Dickinson +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +def _get_cell_type(): + def f(x=None): + return lambda: x + return type(f().__closure__[0]) + +CellType = _get_cell_type() + +def annotated_references(obj): + """ + Return known information about references held by the given object. + + Returns a mapping from referents to lists of descriptions. Note that there + may be more than one edge leading to any particular referent; hence the + need for a list. Descriptions are currently strings. + + """ + references: Dict[int, List[str]] = {} + + def add_reference(name, obj): + references.setdefault(id(obj), []).append(name) + + def add_attrs(*attrs): + for attr in attrs: + if hasattr(obj, attr): + add_reference(attr, getattr(obj, attr)) + + def add_cell_references(): + try: + add_attrs("cell_contents") + except ValueError: + # if cell_contents is empty, + # accessing it raises ValueError + # in this case there is no object to + # annotate + pass + + def add_function_references(): + add_attrs("__defaults__", + "__closure__", + "__globals__", + "__code__", + "__name__", + "__module__", + "__doc__" + "__qualname__", + "__annotations__", + "__kwdefaults__") + + + def add_sequence_references(): + for position, item in enumerate(obj): + add_reference(f"[{position}]", item) + + def add_dict_references(): + for key, value in obj.items(): + add_reference("key", key) + add_reference(f"[{repr(key)}]", value) + + def add_set_references(): + for elt in obj: + add_reference("element", elt) + + def add_bound_method_references(): + add_attrs("__self__", "__func__", "im_class") + + def add_weakref_references(): + # For subclasses of weakref, we can't reliably distinguish the + # callback (if any) from other attributes. + if type(obj) is weakref.ref: + referents = gc.get_referents(obj) + if len(referents) == 1: + target = referents[0] + add_reference("__callback__", target) + + + def add_frame_references(): + f_locals = obj.f_locals + add_attrs("f_back", "f_code", "f_builtins", "f_globals", "f_trace", "f_locals") + # Some badly-behaved code replaces the f_locals dict with + # something that doesn't support the full dict interface. So we + # only continue with the annotation if f_locals is a Python dict. + if type(f_locals) is dict: + for name, local in obj.f_locals.items(): + add_reference(f"local {name}", local) + + def add_getset_descriptor_references(): + add_attrs("__objclass__", "__name__", "__doc__") + + type_based_references = { + tuple: add_sequence_references, + list: add_sequence_references, + dict: add_dict_references, + set: add_set_references, + frozenset: add_set_references, + types.FunctionType: add_function_references, + types.FrameType: add_frame_references, + CellType: add_cell_references, + types.MethodType: add_bound_method_references, + weakref.ref: add_weakref_references, + types.GetSetDescriptorType: add_getset_descriptor_references, + } + + for type_ in type(obj).__mro__: + if type_ in type_based_references: + type_based_references[type_]() + + add_attrs("__dict__", "__class__") + if isinstance(obj, type): + add_attrs("__mro__") + + return references + +############################################################################### +# Object annotations. + + +BASE_TYPES = (int, float, complex, type(None), str, bytes) +FRAME_FILENAME_LIMIT = 32 + +def object_annotation(obj): + """ + Return a string to be used for Graphviz nodes. + + The string should be short but as informative as possible. + """ + + def format_sequence(obj): + body = ','.join(repr(x) if isinstance(x, BASE_TYPES) else type(x).__name__ for i, x in zip(range(8), obj)) + if len(obj) > 8: + body = f'{body}, ...{len(obj) - 8}' + return body + + # For basic types, use the repr. + if isinstance(obj, BASE_TYPES): + return repr(obj) + if type(obj).__name__ == 'function': + return f"function\n{obj.__name__}" + elif isinstance(obj, types.MethodType): + try: + func_name = obj.__func__.__qualname__ + except AttributeError: + func_name = "" + return f"instancemethod\n{func_name}" + elif isinstance(obj, list): + return f"[{format_sequence(obj)}]" + elif isinstance(obj, tuple): + return f"({format_sequence(obj)})" + elif isinstance(obj, dict): + return f"dict[{len(obj)}]" + elif isinstance(obj, types.ModuleType): + return f"module\n{obj.__name__}" + elif isinstance(obj, type): + return f"type\n{obj.__name__}" + elif isinstance(obj, weakref.ref): + referent = obj() + if referent is None: + return "weakref (dead referent)" + else: + return f"weakref to id 0x{id(referent):x}" + elif isinstance(obj, types.FrameType): + filename = obj.f_code.co_filename + if len(filename) > FRAME_FILENAME_LIMIT: + filename = "..." + filename[-(FRAME_FILENAME_LIMIT - 3):] + return f"frame\n{filename}:{obj.f_lineno}" + else: + return f"object\n{type(obj).__module__}.{type(obj).__name__}" + + + +class Node(NamedTuple): + label: str + context: Optional[str] + root: bool + referrents: List[Tuple[str, int]] + +def create_graph(objects, *, context=None, filter=None): + if context is None: + context = cuda_allocation_context() + if filter is None: + filter = is_cuda_tensor + + nodes = [Node(object_annotation(obj), context(obj), filter(obj), []) for obj in objects] + node_referrers: List[List[int]] = [[] for obj in objects] + + id_to_node = {id(obj): i for i, obj in enumerate(objects)} + for obj in objects: + fidx = id_to_node[id(obj)] + f = nodes[fidx] + references = annotated_references(obj) + for referrent in gc.get_referents(obj): + rid = id(referrent) + tidx = id_to_node.get(rid, None) + if tidx is None: + continue + t = nodes[tidx] + labels = references.get(rid, ["?"]) + node_referrers[tidx].append(fidx) + for label in labels: + f.referrents.append((label, tidx)) + + to_search = [i for i, n in enumerate(nodes) if n.root] + to_keep = set() + while to_search: + idx = to_search.pop() + if idx in to_keep: + continue + to_keep.add(idx) + referrers = node_referrers[idx] + to_search.extend(referrers) + id_to_filtered_id: Dict[int, int] = {} + filtered: List[Any] = [] + for i, n in enumerate(nodes): + if i in to_keep: + id_to_filtered_id[i] = len(id_to_filtered_id) + filtered.append(n) + for n in filtered: + n.referrents[:] = [(label, id_to_filtered_id[idx]) + for (label, idx) in n.referrents + if idx in id_to_filtered_id] + return filtered + +def escape(n): + return json.dumps(n) + + +def is_cuda_tensor(obj): + return isinstance(obj, torch.Tensor) and obj.is_cuda and not isinstance(obj, torch._subclasses.FakeTensor) + +def cuda_allocation_context(): + snapshot = torch.cuda.memory._snapshot() + addr_to_frame = {} + for seg in snapshot['segments']: + addr = seg['address'] + for blk in seg['blocks']: + if blk['state'] == 'active_allocated': + frames, real_size = _block_extra(blk) + addr_to_frame[addr] = frames + addr += blk['size'] + + def object_context(obj): + if is_cuda_tensor(obj): + addr = obj.untyped_storage().data_ptr() + frames = addr_to_frame.get(addr) + if frames is not None: + return '\n'.join(_frames_fmt(frames, full_filename=True)) + return None + return object_context + +def to_dot(nodes): + lines = ["digraph GraphName {", "node [shape=rect];", 'rankdir=LR;'] + for i, n in enumerate(nodes): + lines.append(f'{i} [label={escape(n.label)}, color={ "red" if n.root else "black"}];') + + for i, f in enumerate(nodes): + for label, j in f.referrents: + lines.append(f'{i} -> {j} [label = {escape(label)}]') + lines.append("}\n") + return '\n'.join(lines) + +_template = """ + + + + + + +
+
+
+
+
Mouse over tensor objects to see where they were allocated.
+
+
+ + + + +""" +_listener_template = """ +document.getElementById('node{id}').addEventListener('mouseover', function(event) {{ + document.getElementById("stacktrace").textContent = {stack} +}}) +""" +def to_html(nodes): + listeners = [] + for i, n in enumerate(nodes): + if n.context is None: + continue + s = _listener_template.format(id=str(i + 1), stack=escape(f'{n.label}:\n{n.context}')) + listeners.append(s) + dot = to_dot(nodes) + return _template.replace('$DOT', repr(dot)).replace('$LISTENERS', '\n'.join(listeners)) + +def observe_tensor_cycles(callback): + torch.cuda.memory._record_memory_history(max_entries=100000) + + def observer(garbage): + if garbage: + if not any(is_cuda_tensor(obj) for obj in garbage): + logger.info("No CUDA Tensors found in garbage") + return + callback(to_html(create_graph(garbage))) + return observe_garbage(observer) + + +def warn_tensor_cycles(): + """ + Install a warning that reports whenever a cycle that is holding CUDA memory is observed. + + The warning produces an .html file that visualizes the cycle, + and links it to the stack frame that allocted the CUDA tensor. + + Reference cycles are freed by the cycle collector rather than being cleaned up + when the objects in the cycle first become unreachable. If a cycle points to a tensor, + the CUDA memory for that tensor will not be freed until garbage collection runs. + Accumulation of CUDA allocations can lead to out of memory errors (OOMs), as well as + non-deterministic allocation behavior which is harder to debug. + """ + logger.info("Watching Python reference cycles for CUDA Tensors.") + + def write_and_log(html): + with NamedTemporaryFile('w', suffix='.html', delete=False) as f: + f.write(html) + logger.warning('Reference cycle includes a CUDA Tensor see visualization of cycle %s', f.name) + return observe_tensor_cycles(write_and_log)