applied-ai-018 commited on
Commit
ca435d7
·
verified ·
1 Parent(s): a761cc1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/20.attention.dense.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/20.attention.dense.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_config_module.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_contextlib.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cpp_extension_versioner.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cuda_trace.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cxx_pytree.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_device.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_foreach_utils.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_freeze.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_import_utils.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_mode_utils.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_stats.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/utils/__pycache__/_triton.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/utils/__pycache__/backend_registration.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_backtrace.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_extension.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/utils/__pycache__/deterministic.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/utils/__pycache__/file_baton.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/utils/__pycache__/hooks.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/utils/__pycache__/mkldnn.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/torch/utils/__pycache__/mobile_optimizer.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/utils/data/__init__.py +76 -0
  27. venv/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py +5 -0
  28. venv/lib/python3.10/site-packages/torch/utils/data/dataloader.py +1479 -0
  29. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py +3 -0
  30. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py +184 -0
  31. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py +248 -0
  32. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py +430 -0
  33. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py +11 -0
  34. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframe_wrapper.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py +125 -0
  40. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py +433 -0
  41. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py +131 -0
  42. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py +18 -0
  43. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py +404 -0
  44. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi +689 -0
  45. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py +246 -0
  46. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py +237 -0
  47. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py +71 -0
  48. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py +96 -0
  49. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py +51 -0
  50. venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__init__.py +17 -0
ckpts/universal/global_step120/zero/20.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e0cc75fbea31f6affdffe947d5642a916d442590c4c918531411264e555d1be
3
+ size 16778396
ckpts/universal/global_step120/zero/20.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e754b97c41c0c16a0c8cc0c6304f726937cab0b397ef7d66be8e31c9a0e16ad0
3
+ size 16778317
ckpts/universal/global_step120/zero/5.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:483990bec3315475e0bfeea968847d56821d682df456cb937136d24c3f5334be
3
+ size 33555627
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_config_module.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_contextlib.cpython-310.pyc ADDED
Binary file (5.34 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cpp_extension_versioner.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cuda_trace.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_cxx_pytree.cpython-310.pyc ADDED
Binary file (31.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_device.cpython-310.pyc ADDED
Binary file (3.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_foreach_utils.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_freeze.cpython-310.pyc ADDED
Binary file (8.24 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_import_utils.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_mode_utils.cpython-310.pyc ADDED
Binary file (604 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_stats.cpython-310.pyc ADDED
Binary file (844 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/_triton.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/backend_registration.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_backtrace.cpython-310.pyc ADDED
Binary file (685 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_extension.cpython-310.pyc ADDED
Binary file (71.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/deterministic.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/file_baton.cpython-310.pyc ADDED
Binary file (1.88 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (8.74 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/mkldnn.cpython-310.pyc ADDED
Binary file (7.03 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/mobile_optimizer.cpython-310.pyc ADDED
Binary file (4.98 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc ADDED
Binary file (6.94 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc ADDED
Binary file (9.13 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/data/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO(VitalyFedyunin): Rearranging this imports leads to crash,
2
+ # need to cleanup dependencies and fix it
3
+ from torch.utils.data.sampler import (
4
+ BatchSampler,
5
+ RandomSampler,
6
+ Sampler,
7
+ SequentialSampler,
8
+ SubsetRandomSampler,
9
+ WeightedRandomSampler,
10
+ )
11
+ from torch.utils.data.dataset import (
12
+ ChainDataset,
13
+ ConcatDataset,
14
+ Dataset,
15
+ IterableDataset,
16
+ StackDataset,
17
+ Subset,
18
+ TensorDataset,
19
+ random_split,
20
+ )
21
+ from torch.utils.data.datapipes.datapipe import (
22
+ DFIterDataPipe,
23
+ DataChunk,
24
+ IterDataPipe,
25
+ MapDataPipe,
26
+ )
27
+ from torch.utils.data.dataloader import (
28
+ DataLoader,
29
+ _DatasetKind,
30
+ get_worker_info,
31
+ default_collate,
32
+ default_convert,
33
+ )
34
+ from torch.utils.data.distributed import DistributedSampler
35
+ from torch.utils.data.datapipes._decorator import (
36
+ argument_validation,
37
+ functional_datapipe,
38
+ guaranteed_datapipes_determinism,
39
+ non_deterministic,
40
+ runtime_validation,
41
+ runtime_validation_disabled,
42
+ )
43
+
44
+ __all__ = ['BatchSampler',
45
+ 'ChainDataset',
46
+ 'ConcatDataset',
47
+ 'DFIterDataPipe',
48
+ 'DataChunk',
49
+ 'DataLoader',
50
+ 'Dataset',
51
+ 'DistributedSampler',
52
+ 'IterDataPipe',
53
+ 'IterableDataset',
54
+ 'MapDataPipe',
55
+ 'RandomSampler',
56
+ 'Sampler',
57
+ 'SequentialSampler',
58
+ 'StackDataset',
59
+ 'Subset',
60
+ 'SubsetRandomSampler',
61
+ 'TensorDataset',
62
+ 'WeightedRandomSampler',
63
+ '_DatasetKind',
64
+ 'argument_validation',
65
+ 'default_collate',
66
+ 'default_convert',
67
+ 'functional_datapipe',
68
+ 'get_worker_info',
69
+ 'guaranteed_datapipes_determinism',
70
+ 'non_deterministic',
71
+ 'random_split',
72
+ 'runtime_validation',
73
+ 'runtime_validation_disabled']
74
+
75
+ # Please keep this list sorted
76
+ assert __all__ == sorted(__all__)
venv/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ def worker_init_fn(worker_id):
4
+ warnings.warn("Usage of backward_compatibility.worker_init_fn is deprecated"
5
+ " as DataLoader automatically applies sharding in every worker")
venv/lib/python3.10/site-packages/torch/utils/data/dataloader.py ADDED
@@ -0,0 +1,1479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter.
2
+
3
+ To support these two classes, in `./_utils` we define many utility methods and
4
+ functions to be run in multiprocessing. E.g., the data loading worker loop is
5
+ in `./_utils/worker.py`.
6
+ """
7
+
8
+ import functools
9
+ import itertools
10
+ import logging
11
+ import os
12
+ import queue
13
+ import threading
14
+ import warnings
15
+
16
+ from typing import Any, Callable, Iterable, TypeVar, Generic, List, Optional, Union
17
+
18
+ import multiprocessing as python_multiprocessing
19
+ import torch
20
+ import torch.distributed as dist
21
+ import torch.multiprocessing as multiprocessing
22
+ import torch.utils.data.graph_settings
23
+
24
+ from torch._utils import ExceptionWrapper
25
+
26
+ from . import (
27
+ IterDataPipe,
28
+ MapDataPipe,
29
+ IterableDataset,
30
+ Sampler,
31
+ SequentialSampler,
32
+ RandomSampler,
33
+ BatchSampler,
34
+ Dataset,)
35
+
36
+ from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper
37
+
38
+ from . import _utils
39
+
40
+ __all__ = [
41
+ "DataLoader",
42
+ "get_worker_info",
43
+ "default_collate",
44
+ "default_convert",
45
+ ]
46
+
47
+ T_co = TypeVar('T_co', covariant=True)
48
+ T = TypeVar('T')
49
+ _worker_init_fn_t = Callable[[int], None]
50
+
51
+ # Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that
52
+ # type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'.
53
+ # See https://github.com/python/mypy/issues/3737.
54
+ _collate_fn_t = Callable[[List[T]], Any]
55
+
56
+
57
+ # These functions used to be defined in this file. However, it was moved to
58
+ # _utils/collate.py. Although it is rather hard to access this from user land
59
+ # (one has to explicitly directly `import torch.utils.data.dataloader`), there
60
+ # probably is user code out there using it. This aliasing maintains BC in this
61
+ # aspect.
62
+ default_collate: _collate_fn_t = _utils.collate.default_collate
63
+ default_convert = _utils.collate.default_convert
64
+
65
+ get_worker_info = _utils.worker.get_worker_info
66
+
67
+ logger = logging.getLogger(__name__)
68
+
69
+
70
+ class _DatasetKind:
71
+ Map = 0
72
+ Iterable = 1
73
+
74
+ @staticmethod
75
+ def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last):
76
+ if kind == _DatasetKind.Map:
77
+ return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
78
+ else:
79
+ return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
80
+
81
+
82
+ class _InfiniteConstantSampler(Sampler):
83
+ r"""Analogous to ``itertools.repeat(None, None)``.
84
+
85
+ Used as sampler for :class:`~torch.utils.data.IterableDataset`.
86
+ """
87
+
88
+ def __iter__(self):
89
+ while True:
90
+ yield None
91
+
92
+
93
+ def _get_distributed_settings():
94
+ if dist.is_available() and dist.is_initialized():
95
+ return dist.get_world_size(), dist.get_rank()
96
+ else:
97
+ return 1, 0
98
+
99
+
100
+ def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id):
101
+ global_worker_id = worker_id
102
+ info = torch.utils.data.get_worker_info()
103
+ assert info is not None
104
+ total_workers = info.num_workers
105
+ datapipe = info.dataset
106
+ assert isinstance(datapipe, (IterDataPipe, MapDataPipe))
107
+ # To distribute elements across distributed process evenly, we should shard data on distributed
108
+ # processes first then shard on worker processes
109
+ total_workers *= world_size
110
+ global_worker_id = global_worker_id * world_size + rank_id
111
+ # For BC, use default SHARDING_PRIORITIES
112
+ torch.utils.data.graph_settings.apply_sharding(datapipe, total_workers, global_worker_id)
113
+ if worker_init_fn is not None:
114
+ worker_init_fn(worker_id)
115
+
116
+
117
+ def _share_dist_seed(generator, pg):
118
+ _shared_seed = torch.empty((), dtype=torch.int64).random_(generator=generator)
119
+ if isinstance(pg, dist.ProcessGroup):
120
+ dist.broadcast(_shared_seed, src=0, group=pg)
121
+ return _shared_seed.item()
122
+
123
+
124
+ class DataLoader(Generic[T_co]):
125
+ r"""
126
+ Data loader combines a dataset and a sampler, and provides an iterable over the given dataset.
127
+
128
+ The :class:`~torch.utils.data.DataLoader` supports both map-style and
129
+ iterable-style datasets with single- or multi-process loading, customizing
130
+ loading order and optional automatic batching (collation) and memory pinning.
131
+
132
+ See :py:mod:`torch.utils.data` documentation page for more details.
133
+
134
+ Args:
135
+ dataset (Dataset): dataset from which to load the data.
136
+ batch_size (int, optional): how many samples per batch to load
137
+ (default: ``1``).
138
+ shuffle (bool, optional): set to ``True`` to have the data reshuffled
139
+ at every epoch (default: ``False``).
140
+ sampler (Sampler or Iterable, optional): defines the strategy to draw
141
+ samples from the dataset. Can be any ``Iterable`` with ``__len__``
142
+ implemented. If specified, :attr:`shuffle` must not be specified.
143
+ batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but
144
+ returns a batch of indices at a time. Mutually exclusive with
145
+ :attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`,
146
+ and :attr:`drop_last`.
147
+ num_workers (int, optional): how many subprocesses to use for data
148
+ loading. ``0`` means that the data will be loaded in the main process.
149
+ (default: ``0``)
150
+ collate_fn (Callable, optional): merges a list of samples to form a
151
+ mini-batch of Tensor(s). Used when using batched loading from a
152
+ map-style dataset.
153
+ pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
154
+ into device/CUDA pinned memory before returning them. If your data elements
155
+ are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
156
+ see the example below.
157
+ drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
158
+ if the dataset size is not divisible by the batch size. If ``False`` and
159
+ the size of dataset is not divisible by the batch size, then the last batch
160
+ will be smaller. (default: ``False``)
161
+ timeout (numeric, optional): if positive, the timeout value for collecting a batch
162
+ from workers. Should always be non-negative. (default: ``0``)
163
+ worker_init_fn (Callable, optional): If not ``None``, this will be called on each
164
+ worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
165
+ input, after seeding and before data loading. (default: ``None``)
166
+ multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If
167
+ ``None``, the default `multiprocessing context`_ of your operating system will
168
+ be used. (default: ``None``)
169
+ generator (torch.Generator, optional): If not ``None``, this RNG will be used
170
+ by RandomSampler to generate random indexes and multiprocessing to generate
171
+ ``base_seed`` for workers. (default: ``None``)
172
+ prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
173
+ in advance by each worker. ``2`` means there will be a total of
174
+ 2 * num_workers batches prefetched across all workers. (default value depends
175
+ on the set value for num_workers. If value of num_workers=0 default is ``None``.
176
+ Otherwise, if value of ``num_workers > 0`` default is ``2``).
177
+ persistent_workers (bool, optional): If ``True``, the data loader will not shut down
178
+ the worker processes after a dataset has been consumed once. This allows to
179
+ maintain the workers `Dataset` instances alive. (default: ``False``)
180
+ pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is
181
+ ``True``.
182
+
183
+
184
+ .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn`
185
+ cannot be an unpicklable object, e.g., a lambda function. See
186
+ :ref:`multiprocessing-best-practices` on more details related
187
+ to multiprocessing in PyTorch.
188
+
189
+ .. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used.
190
+ When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`,
191
+ it instead returns an estimate based on ``len(dataset) / batch_size``, with proper
192
+ rounding depending on :attr:`drop_last`, regardless of multi-process loading
193
+ configurations. This represents the best guess PyTorch can make because PyTorch
194
+ trusts user :attr:`dataset` code in correctly handling multi-process
195
+ loading to avoid duplicate data.
196
+
197
+ However, if sharding results in multiple workers having incomplete last batches,
198
+ this estimate can still be inaccurate, because (1) an otherwise complete batch can
199
+ be broken into multiple ones and (2) more than one batch worth of samples can be
200
+ dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such
201
+ cases in general.
202
+
203
+ See `Dataset Types`_ for more details on these two types of datasets and how
204
+ :class:`~torch.utils.data.IterableDataset` interacts with
205
+ `Multi-process data loading`_.
206
+
207
+ .. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and
208
+ :ref:`data-loading-randomness` notes for random seed related questions.
209
+
210
+ .. _multiprocessing context:
211
+ https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
212
+ """
213
+
214
+ dataset: Dataset[T_co]
215
+ batch_size: Optional[int]
216
+ num_workers: int
217
+ pin_memory: bool
218
+ drop_last: bool
219
+ timeout: float
220
+ sampler: Union[Sampler, Iterable]
221
+ pin_memory_device: str
222
+ prefetch_factor: Optional[int]
223
+ _iterator : Optional['_BaseDataLoaderIter']
224
+ __initialized = False
225
+
226
+ def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1,
227
+ shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None,
228
+ batch_sampler: Union[Sampler[List], Iterable[List], None] = None,
229
+ num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None,
230
+ pin_memory: bool = False, drop_last: bool = False,
231
+ timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None,
232
+ multiprocessing_context=None, generator=None,
233
+ *, prefetch_factor: Optional[int] = None,
234
+ persistent_workers: bool = False,
235
+ pin_memory_device: str = ""):
236
+ torch._C._log_api_usage_once("python.data_loader")
237
+
238
+ if num_workers < 0:
239
+ raise ValueError('num_workers option should be non-negative; '
240
+ 'use num_workers=0 to disable multiprocessing.')
241
+
242
+ if timeout < 0:
243
+ raise ValueError('timeout option should be non-negative')
244
+
245
+ if num_workers == 0 and prefetch_factor is not None:
246
+ raise ValueError('prefetch_factor option could only be specified in multiprocessing.'
247
+ 'let num_workers > 0 to enable multiprocessing, otherwise set prefetch_factor to None.')
248
+ elif num_workers > 0 and prefetch_factor is None:
249
+ prefetch_factor = 2
250
+ elif prefetch_factor is not None and prefetch_factor < 0:
251
+ raise ValueError('prefetch_factor option should be non-negative')
252
+
253
+ if persistent_workers and num_workers == 0:
254
+ raise ValueError('persistent_workers option needs num_workers > 0')
255
+
256
+ self.dataset = dataset
257
+ self.num_workers = num_workers
258
+ self.prefetch_factor = prefetch_factor
259
+ self.pin_memory = pin_memory
260
+ self.pin_memory_device = pin_memory_device
261
+ self.timeout = timeout
262
+ self.worker_init_fn = worker_init_fn
263
+ self.multiprocessing_context = multiprocessing_context
264
+
265
+ # Adds forward compatibilities so classic DataLoader can work with DataPipes:
266
+ # _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler
267
+ if isinstance(self.dataset, IterDataPipe):
268
+ self.dataset = _IterDataPipeSerializationWrapper(self.dataset)
269
+ elif isinstance(self.dataset, MapDataPipe):
270
+ self.dataset = _MapDataPipeSerializationWrapper(self.dataset)
271
+
272
+ # Arg-check dataset related before checking samplers because we want to
273
+ # tell users that iterable-style datasets are incompatible with custom
274
+ # samplers first, so that they don't learn that this combo doesn't work
275
+ # after spending time fixing the custom sampler errors.
276
+ if isinstance(dataset, IterableDataset):
277
+ self._dataset_kind = _DatasetKind.Iterable
278
+ # NOTE [ Custom Samplers and IterableDataset ]
279
+ #
280
+ # `IterableDataset` does not support custom `batch_sampler` or
281
+ # `sampler` since the key is irrelevant (unless we support
282
+ # generator-style dataset one day...).
283
+ #
284
+ # For `sampler`, we always create a dummy sampler. This is an
285
+ # infinite sampler even when the dataset may have an implemented
286
+ # finite `__len__` because in multi-process data loading, naive
287
+ # settings will return duplicated data (which may be desired), and
288
+ # thus using a sampler with length matching that of dataset will
289
+ # cause data lost (you may have duplicates of the first couple
290
+ # batches, but never see anything afterwards). Therefore,
291
+ # `Iterabledataset` always uses an infinite sampler, an instance of
292
+ # `_InfiniteConstantSampler` defined above.
293
+ #
294
+ # A custom `batch_sampler` essentially only controls the batch size.
295
+ # However, it is unclear how useful it would be since an iterable-style
296
+ # dataset can handle that within itself. Moreover, it is pointless
297
+ # in multi-process data loading as the assignment order of batches
298
+ # to workers is an implementation detail so users can not control
299
+ # how to batchify each worker's iterable. Thus, we disable this
300
+ # option. If this turns out to be useful in future, we can re-enable
301
+ # this, and support custom samplers that specify the assignments to
302
+ # specific workers.
303
+ if isinstance(dataset, IterDataPipe):
304
+ if shuffle is not None:
305
+ dataset = torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
306
+ # We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default.
307
+ elif shuffle not in {False, None}:
308
+ raise ValueError(
309
+ f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}")
310
+
311
+ if sampler is not None:
312
+ # See NOTE [ Custom Samplers and IterableDataset ]
313
+ raise ValueError(
314
+ f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}")
315
+ elif batch_sampler is not None:
316
+ # See NOTE [ Custom Samplers and IterableDataset ]
317
+ raise ValueError(
318
+ "DataLoader with IterableDataset: expected unspecified "
319
+ f"batch_sampler option, but got batch_sampler={batch_sampler}")
320
+ else:
321
+ shuffle = bool(shuffle)
322
+ self._dataset_kind = _DatasetKind.Map
323
+
324
+
325
+
326
+ if sampler is not None and shuffle:
327
+ raise ValueError('sampler option is mutually exclusive with '
328
+ 'shuffle')
329
+
330
+ if batch_sampler is not None:
331
+ # auto_collation with custom batch_sampler
332
+ if batch_size != 1 or shuffle or sampler is not None or drop_last:
333
+ raise ValueError('batch_sampler option is mutually exclusive '
334
+ 'with batch_size, shuffle, sampler, and '
335
+ 'drop_last')
336
+ batch_size = None
337
+ drop_last = False
338
+ elif batch_size is None:
339
+ # no auto_collation
340
+ if drop_last:
341
+ raise ValueError('batch_size=None option disables auto-batching '
342
+ 'and is mutually exclusive with drop_last')
343
+
344
+ if sampler is None: # give default samplers
345
+ if self._dataset_kind == _DatasetKind.Iterable:
346
+ # See NOTE [ Custom Samplers and IterableDataset ]
347
+ sampler = _InfiniteConstantSampler()
348
+ else: # map-style
349
+ if shuffle:
350
+ sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type]
351
+ else:
352
+ sampler = SequentialSampler(dataset) # type: ignore[arg-type]
353
+
354
+ if batch_size is not None and batch_sampler is None:
355
+ # auto_collation without custom batch_sampler
356
+ batch_sampler = BatchSampler(sampler, batch_size, drop_last)
357
+
358
+ self.batch_size = batch_size
359
+ self.drop_last = drop_last
360
+ self.sampler = sampler
361
+ self.batch_sampler = batch_sampler
362
+ self.generator = generator
363
+
364
+ if collate_fn is None:
365
+ if self._auto_collation:
366
+ collate_fn = _utils.collate.default_collate
367
+ else:
368
+ collate_fn = _utils.collate.default_convert
369
+
370
+ self.collate_fn = collate_fn
371
+ self.persistent_workers = persistent_workers
372
+
373
+ self.__initialized = True
374
+ self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ]
375
+
376
+ self._iterator = None
377
+
378
+ self.check_worker_number_rationality()
379
+
380
+ torch.set_vital('Dataloader', 'enabled', 'True') # type: ignore[attr-defined]
381
+
382
+ def _get_iterator(self) -> '_BaseDataLoaderIter':
383
+ if self.num_workers == 0:
384
+ return _SingleProcessDataLoaderIter(self)
385
+ else:
386
+ self.check_worker_number_rationality()
387
+ return _MultiProcessingDataLoaderIter(self)
388
+
389
+ @property
390
+ def multiprocessing_context(self):
391
+ return self.__multiprocessing_context
392
+
393
+ @multiprocessing_context.setter
394
+ def multiprocessing_context(self, multiprocessing_context):
395
+ if multiprocessing_context is not None:
396
+ if self.num_workers > 0:
397
+ if isinstance(multiprocessing_context, str):
398
+ valid_start_methods = multiprocessing.get_all_start_methods()
399
+ if multiprocessing_context not in valid_start_methods:
400
+ raise ValueError(
401
+ 'multiprocessing_context option '
402
+ f'should specify a valid start method in {valid_start_methods!r}, but got '
403
+ f'multiprocessing_context={multiprocessing_context!r}')
404
+ multiprocessing_context = multiprocessing.get_context(multiprocessing_context)
405
+
406
+ if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext):
407
+ raise TypeError('multiprocessing_context option should be a valid context '
408
+ 'object or a string specifying the start method, but got '
409
+ f'multiprocessing_context={multiprocessing_context}')
410
+ else:
411
+ raise ValueError('multiprocessing_context can only be used with '
412
+ 'multi-process loading (num_workers > 0), but got '
413
+ f'num_workers={self.num_workers}')
414
+
415
+ self.__multiprocessing_context = multiprocessing_context
416
+
417
+ def __setattr__(self, attr, val):
418
+ if self.__initialized and attr in (
419
+ 'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'):
420
+ raise ValueError(f'{attr} attribute should not be set after {self.__class__.__name__} is initialized')
421
+
422
+ super().__setattr__(attr, val)
423
+
424
+ # We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up
425
+ # since '_BaseDataLoaderIter' references 'DataLoader'.
426
+ def __iter__(self) -> '_BaseDataLoaderIter':
427
+ # When using a single worker the returned iterator should be
428
+ # created everytime to avoid resetting its state
429
+ # However, in the case of a multiple workers iterator
430
+ # the iterator is only created once in the lifetime of the
431
+ # DataLoader object so that workers can be reused
432
+ if self.persistent_workers and self.num_workers > 0:
433
+ if self._iterator is None:
434
+ self._iterator = self._get_iterator()
435
+ else:
436
+ self._iterator._reset(self)
437
+ return self._iterator
438
+ else:
439
+ return self._get_iterator()
440
+
441
+ @property
442
+ def _auto_collation(self):
443
+ return self.batch_sampler is not None
444
+
445
+ @property
446
+ def _index_sampler(self):
447
+ # The actual sampler used for generating indices for `_DatasetFetcher`
448
+ # (see _utils/fetch.py) to read data at each time. This would be
449
+ # `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise.
450
+ # We can't change `.sampler` and `.batch_sampler` attributes for BC
451
+ # reasons.
452
+ if self._auto_collation:
453
+ return self.batch_sampler
454
+ else:
455
+ return self.sampler
456
+
457
+ def __len__(self) -> int:
458
+ if self._dataset_kind == _DatasetKind.Iterable:
459
+ # NOTE [ IterableDataset and __len__ ]
460
+ #
461
+ # For `IterableDataset`, `__len__` could be inaccurate when one naively
462
+ # does multi-processing data loading, since the samples will be duplicated.
463
+ # However, no real use case should be actually using that behavior, so
464
+ # it should count as a user error. We should generally trust user
465
+ # code to do the proper thing (e.g., configure each replica differently
466
+ # in `__iter__`), and give us the correct `__len__` if they choose to
467
+ # implement it (this will still throw if the dataset does not implement
468
+ # a `__len__`).
469
+ #
470
+ # To provide a further warning, we track if `__len__` was called on the
471
+ # `DataLoader`, save the returned value in `self._len_called`, and warn
472
+ # if the iterator ends up yielding more than this number of samples.
473
+
474
+ # Cannot statically verify that dataset is Sized
475
+ length = self._IterableDataset_len_called = len(self.dataset) # type: ignore[assignment, arg-type]
476
+ if self.batch_size is not None: # IterableDataset doesn't allow custom sampler or batch_sampler
477
+ from math import ceil
478
+ if self.drop_last:
479
+ length = length // self.batch_size
480
+ else:
481
+ length = ceil(length / self.batch_size)
482
+ return length
483
+ else:
484
+ return len(self._index_sampler)
485
+
486
+ def check_worker_number_rationality(self):
487
+ # This function check whether the dataloader's worker number is rational based on
488
+ # current system's resource. Current rule is that if the number of workers this
489
+ # Dataloader will create is bigger than the number of logical cpus that is allowed to
490
+ # use, than we will pop up a warning to let user pay attention.
491
+ #
492
+ # eg. If current system has 2 physical CPUs with 16 cores each. And each core support 2
493
+ # threads, then the total logical cpus here is 2 * 16 * 2 = 64. Let's say current
494
+ # DataLoader process can use half of them which is 32, then the rational max number of
495
+ # worker that initiated from this process is 32.
496
+ # Now, let's say the created DataLoader has num_works = 40, which is bigger than 32.
497
+ # So the warning message is triggered to notify the user to lower the worker number if
498
+ # necessary.
499
+ #
500
+ #
501
+ # [Note] Please note that this function repects `cpuset` only when os.sched_getaffinity is
502
+ # available (available in most of Linux system, but not OSX and Windows).
503
+ # When os.sched_getaffinity is not available, os.cpu_count() is called instead, but
504
+ # it doesn't repect cpuset.
505
+ # We don't take threading into account since each worker process is single threaded
506
+ # at this time.
507
+ #
508
+ # We don't set any threading flags (eg. OMP_NUM_THREADS, MKL_NUM_THREADS, etc)
509
+ # other than `torch.set_num_threads` to 1 in the worker process, if the passing
510
+ # in functions use 3rd party modules that rely on those threading flags to determine
511
+ # how many thread to create (eg. numpy, etc), then it is caller's responsibility to
512
+ # set those flags correctly.
513
+ def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked):
514
+
515
+ suggested_max_worker_msg = ((
516
+ "Our suggested max number of worker in current system is {}{}, which is smaller "
517
+ "than what this DataLoader is going to create.").format(
518
+ num_worker_suggest,
519
+ ("" if cpuset_checked else " (`cpuset` is not taken into account)"))
520
+ ) if num_worker_suggest is not None else (
521
+ "DataLoader is not able to compute a suggested max number of worker in current system.")
522
+
523
+ warn_msg = (
524
+ "This DataLoader will create {} worker processes in total. {} "
525
+ "Please be aware that excessive worker creation might get DataLoader running slow or even freeze, "
526
+ "lower the worker number to avoid potential slowness/freeze if necessary.").format(
527
+ num_worker_created,
528
+ suggested_max_worker_msg)
529
+ return warn_msg
530
+
531
+ if not self.num_workers or self.num_workers == 0:
532
+ return
533
+
534
+ # try to compute a suggested max number of worker based on system's resource
535
+ max_num_worker_suggest = None
536
+ cpuset_checked = False
537
+ if hasattr(os, 'sched_getaffinity'):
538
+ try:
539
+ max_num_worker_suggest = len(os.sched_getaffinity(0))
540
+ cpuset_checked = True
541
+ except Exception:
542
+ pass
543
+ if max_num_worker_suggest is None:
544
+ # os.cpu_count() could return Optional[int]
545
+ # get cpu count first and check None in order to satisfy mypy check
546
+ cpu_count = os.cpu_count()
547
+ if cpu_count is not None:
548
+ max_num_worker_suggest = cpu_count
549
+
550
+ if max_num_worker_suggest is None:
551
+ warnings.warn(_create_warning_msg(
552
+ max_num_worker_suggest,
553
+ self.num_workers,
554
+ cpuset_checked))
555
+ return
556
+
557
+ if self.num_workers > max_num_worker_suggest:
558
+ warnings.warn(_create_warning_msg(
559
+ max_num_worker_suggest,
560
+ self.num_workers,
561
+ cpuset_checked))
562
+
563
+
564
+ class _BaseDataLoaderIter:
565
+ def __init__(self, loader: DataLoader) -> None:
566
+ self._dataset = loader.dataset
567
+ self._shared_seed = None
568
+ self._pg = None
569
+ if isinstance(self._dataset, IterDataPipe):
570
+ if dist.is_available() and dist.is_initialized():
571
+ self._pg = dist.new_group(backend="gloo")
572
+ self._shared_seed = _share_dist_seed(loader.generator, self._pg)
573
+ shared_rng = torch.Generator()
574
+ shared_rng.manual_seed(self._shared_seed)
575
+ self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng)
576
+ self._dataset_kind = loader._dataset_kind
577
+ self._IterableDataset_len_called = loader._IterableDataset_len_called
578
+ self._auto_collation = loader._auto_collation
579
+ self._drop_last = loader.drop_last
580
+ self._index_sampler = loader._index_sampler
581
+ self._num_workers = loader.num_workers
582
+ ws, rank = _get_distributed_settings()
583
+ self._world_size = ws
584
+ self._rank = rank
585
+ # for other backends, pin_memory_device need to set. if not set
586
+ # default behaviour is CUDA device. if pin_memory_device is selected
587
+ # and pin_memory is not set, the default behaviour false.
588
+ if (len(loader.pin_memory_device) == 0):
589
+ self._pin_memory = loader.pin_memory and torch.cuda.is_available()
590
+ self._pin_memory_device = None
591
+ else:
592
+ if not loader.pin_memory:
593
+ warn_msg = ("pin memory device is set and pin_memory flag is not used then device pinned memory won't be used"
594
+ "please set pin_memory to true, if you need to use the device pin memory")
595
+ warnings.warn(warn_msg)
596
+
597
+ self._pin_memory = loader.pin_memory
598
+ self._pin_memory_device = loader.pin_memory_device
599
+ self._timeout = loader.timeout
600
+ self._collate_fn = loader.collate_fn
601
+ self._sampler_iter = iter(self._index_sampler)
602
+ self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item()
603
+ self._persistent_workers = loader.persistent_workers
604
+ self._num_yielded = 0
605
+ self._profile_name = f"enumerate(DataLoader)#{self.__class__.__name__}.__next__"
606
+
607
+ def __iter__(self) -> '_BaseDataLoaderIter':
608
+ return self
609
+
610
+ def _reset(self, loader, first_iter=False):
611
+ self._sampler_iter = iter(self._index_sampler)
612
+ self._num_yielded = 0
613
+ self._IterableDataset_len_called = loader._IterableDataset_len_called
614
+ if isinstance(self._dataset, IterDataPipe):
615
+ self._shared_seed = _share_dist_seed(loader.generator, self._pg)
616
+ shared_rng = torch.Generator()
617
+ shared_rng.manual_seed(self._shared_seed)
618
+ self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng)
619
+
620
+ def _next_index(self):
621
+ return next(self._sampler_iter) # may raise StopIteration
622
+
623
+ def _next_data(self):
624
+ raise NotImplementedError
625
+
626
+ def __next__(self) -> Any:
627
+ with torch.autograd.profiler.record_function(self._profile_name):
628
+ if self._sampler_iter is None:
629
+ # TODO(https://github.com/pytorch/pytorch/issues/76750)
630
+ self._reset() # type: ignore[call-arg]
631
+ data = self._next_data()
632
+ self._num_yielded += 1
633
+ if self._dataset_kind == _DatasetKind.Iterable and \
634
+ self._IterableDataset_len_called is not None and \
635
+ self._num_yielded > self._IterableDataset_len_called:
636
+ warn_msg = ("Length of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} "
637
+ "samples have been fetched. ").format(self._dataset, self._IterableDataset_len_called,
638
+ self._num_yielded)
639
+ if self._num_workers > 0:
640
+ warn_msg += ("For multiprocessing data-loading, this could be caused by not properly configuring the "
641
+ "IterableDataset replica at each worker. Please see "
642
+ "https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.")
643
+ warnings.warn(warn_msg)
644
+ return data
645
+
646
+ def __len__(self) -> int:
647
+ return len(self._index_sampler)
648
+
649
+ def __getstate__(self):
650
+ # TODO: add limited pickling support for sharing an iterator
651
+ # across multiple threads for HOGWILD.
652
+ # Probably the best way to do this is by moving the sample pushing
653
+ # to a separate thread and then just sharing the data queue
654
+ # but signalling the end is tricky without a non-blocking API
655
+ raise NotImplementedError("{} cannot be pickled", self.__class__.__name__)
656
+
657
+
658
+ class _SingleProcessDataLoaderIter(_BaseDataLoaderIter):
659
+ def __init__(self, loader):
660
+ super().__init__(loader)
661
+ assert self._timeout == 0
662
+ assert self._num_workers == 0
663
+
664
+ # Adds forward compatibilities so classic DataLoader can work with DataPipes:
665
+ # Taking care of distributed sharding
666
+ if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
667
+ # For BC, use default SHARDING_PRIORITIES
668
+ torch.utils.data.graph_settings.apply_sharding(self._dataset, self._world_size, self._rank)
669
+
670
+ self._dataset_fetcher = _DatasetKind.create_fetcher(
671
+ self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last)
672
+
673
+ def _next_data(self):
674
+ index = self._next_index() # may raise StopIteration
675
+ data = self._dataset_fetcher.fetch(index) # may raise StopIteration
676
+ if self._pin_memory:
677
+ data = _utils.pin_memory.pin_memory(data, self._pin_memory_device)
678
+ return data
679
+
680
+
681
+ class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter):
682
+ r"""Iterates once over the DataLoader's dataset, as specified by the sampler."""
683
+
684
+ # NOTE [ Data Loader Multiprocessing Shutdown Logic ]
685
+ #
686
+ # Preliminary:
687
+ #
688
+ # Our data model looks like this (queues are indicated with curly brackets):
689
+ #
690
+ # main process ||
691
+ # | ||
692
+ # {index_queue} ||
693
+ # | ||
694
+ # worker processes || DATA
695
+ # | ||
696
+ # {worker_result_queue} || FLOW
697
+ # | ||
698
+ # pin_memory_thread of main process || DIRECTION
699
+ # | ||
700
+ # {data_queue} ||
701
+ # | ||
702
+ # data output \/
703
+ #
704
+ # P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if
705
+ # `pin_memory=False`.
706
+ #
707
+ #
708
+ # Terminating multiprocessing logic requires very careful design. In
709
+ # particular, we need to make sure that
710
+ #
711
+ # 1. The iterator gracefully exits the workers when its last reference is
712
+ # gone or it is depleted.
713
+ #
714
+ # In this case, the workers should be gracefully exited because the
715
+ # main process may still need to continue to run, and we want cleaning
716
+ # up code in the workers to be executed (e.g., releasing GPU memory).
717
+ # Naturally, we implement the shutdown logic in `__del__` of
718
+ # DataLoaderIterator.
719
+ #
720
+ # We delay the discussion on the logic in this case until later.
721
+ #
722
+ # 2. The iterator exits the workers when the loader process and/or worker
723
+ # processes exits normally or with error.
724
+ #
725
+ # We set all workers and `pin_memory_thread` to have `daemon=True`.
726
+ #
727
+ # You may ask, why can't we make the workers non-daemonic, and
728
+ # gracefully exit using the same logic as we have in `__del__` when the
729
+ # iterator gets deleted (see 1 above)?
730
+ #
731
+ # First of all, `__del__` is **not** guaranteed to be called when
732
+ # interpreter exits. Even if it is called, by the time it executes,
733
+ # many Python core library resources may already be freed, and even
734
+ # simple things like acquiring an internal lock of a queue may hang.
735
+ # Therefore, in this case, we actually need to prevent `__del__` from
736
+ # being executed, and rely on the automatic termination of daemonic
737
+ # children.
738
+ #
739
+ # Thus, we register an `atexit` hook that sets a global flag
740
+ # `_utils.python_exit_status`. Since `atexit` hooks are executed in the
741
+ # reverse order of registration, we are guaranteed that this flag is
742
+ # set before library resources we use are freed (which, at least in
743
+ # CPython, is done via an `atexit` handler defined in
744
+ # `multiprocessing/util.py`
745
+ # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362
746
+ # registered when an object requiring this mechanism is first
747
+ # created, e.g., `mp.Queue`
748
+ # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103
749
+ # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29
750
+ # )
751
+ #
752
+ # So in `__del__`, we check if `_utils.python_exit_status` is set or
753
+ # `None` (freed), and perform no-op if so.
754
+ #
755
+ # However, simply letting library clean-up codes run can also be bad,
756
+ # because such codes (i.e., `multiprocessing.util._exit_function()`)
757
+ # include join putting threads for `mp.Queue`, which can be blocking.
758
+ # Hence, the main process putting threads are called with
759
+ # `cancel_join_thread` at creation. See later section
760
+ # [ 3b. A process won't hang when putting into a queue; ]
761
+ # for more details.
762
+ #
763
+ # Here are two example cases where library clean-up codes can run
764
+ # before `__del__` is called:
765
+ #
766
+ # 1. If we hold onto a reference to the iterator, it more often
767
+ # than not tries to do `multiprocessing` library cleaning before
768
+ # clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666)
769
+ # and thus prevents our cleaning-up code to run first.
770
+ #
771
+ # 2. A similar issue araises when a `DataLoader` is used in a subprocess.
772
+ # When a process ends, it shuts the all its daemonic children
773
+ # down with a SIGTERM (instead of joining them without a timeout).
774
+ # Simiarly for threads, but by a different mechanism. This fact,
775
+ # together with a few implementation details of multiprocessing, forces
776
+ # us to make workers daemonic. All of our problems arise when a
777
+ # DataLoader is used in a subprocess, and are caused by multiprocessing
778
+ # code which looks more or less like this:
779
+ #
780
+ # try:
781
+ # your_function_using_a_dataloader()
782
+ # finally:
783
+ # multiprocessing.util._exit_function()
784
+ #
785
+ # The joining/termination mentioned above happens inside
786
+ # `_exit_function()`. Now, if `your_function_using_a_dataloader()`
787
+ # throws, the stack trace stored in the exception will prevent the
788
+ # frame which uses `DataLoaderIter` to be freed. If the frame has any
789
+ # reference to the `DataLoaderIter` (e.g., in a method of the iter),
790
+ # its `__del__`, which starts the shutdown procedure, will not be
791
+ # called. That, in turn, means that workers aren't notified. Attempting
792
+ # to join in `_exit_function` will then result in a hang.
793
+ #
794
+ # For context, `_exit_function` is also registered as an `atexit` call.
795
+ # So it is unclear to me (@ssnl) why this is needed in a finally block.
796
+ # The code dates back to 2008 and there is no comment on the original
797
+ # PEP 371 or patch https://bugs.python.org/issue3050 (containing both
798
+ # the finally block and the `atexit` registration) that explains this.
799
+ #
800
+ #
801
+ # Finally, another choice is to just shutdown workers with logic in 1
802
+ # above whenever we see an error in `next`. This isn't ideal because
803
+ # a. It prevents users from using try-catch to resume data loading.
804
+ # b. It doesn't prevent hanging if users have references to the
805
+ # iterator.
806
+ #
807
+ # 3. All processes exit if any of them die unexpectedly by fatal signals.
808
+ #
809
+ # As shown above, the workers are set as daemonic children of the main
810
+ # process. However, automatic cleaning-up of such child processes only
811
+ # happens if the parent process exits gracefully (e.g., not via fatal
812
+ # signals like SIGKILL). So we must ensure that each process will exit
813
+ # even the process that should send/receive data to/from it were
814
+ # killed, i.e.,
815
+ #
816
+ # a. A process won't hang when getting from a queue.
817
+ #
818
+ # Even with carefully designed data dependencies (i.e., a `put()`
819
+ # always corresponding to a `get()`), hanging on `get()` can still
820
+ # happen when data in queue is corrupted (e.g., due to
821
+ # `cancel_join_thread` or unexpected exit).
822
+ #
823
+ # For child exit, we set a timeout whenever we try to get data
824
+ # from `data_queue`, and check the workers' status on each timeout
825
+ # and error.
826
+ # See `_DataLoaderiter._get_batch()` and
827
+ # `_DataLoaderiter._try_get_data()` for details.
828
+ #
829
+ # Additionally, for child exit on non-Windows platforms, we also
830
+ # register a SIGCHLD handler (which is supported on Windows) on
831
+ # the main process, which checks if any of the workers fail in the
832
+ # (Python) handler. This is more efficient and faster in detecting
833
+ # worker failures, compared to only using the above mechanism.
834
+ # See `DataLoader.cpp` and `_utils/signal_handling.py` for details.
835
+ #
836
+ # For `.get()` calls where the sender(s) is not the workers, we
837
+ # guard them with timeouts, and check the status of the sender
838
+ # when timeout happens:
839
+ # + in the workers, the `_utils.worker.ManagerWatchdog` class
840
+ # checks the status of the main process.
841
+ # + if `pin_memory=True`, when getting from `pin_memory_thread`,
842
+ # check `pin_memory_thread` status periodically until `.get()`
843
+ # returns or see that `pin_memory_thread` died.
844
+ #
845
+ # b. A process won't hang when putting into a queue;
846
+ #
847
+ # We use `mp.Queue` which has a separate background thread to put
848
+ # objects from an unbounded buffer array. The background thread is
849
+ # daemonic and usually automatically joined when the process
850
+ # *exits*.
851
+ #
852
+ # In case that the receiver has ended abruptly while
853
+ # reading from the pipe, the join will hang forever. The usual
854
+ # solution for this in Python is calling `q.cancel_join_thread`,
855
+ # which prevents automatically joining it when finalizing
856
+ # (exiting).
857
+ #
858
+ # Nonetheless, `cancel_join_thread` must only be called when the
859
+ # queue is **not** going to be read from or write into by another
860
+ # process, because it may hold onto a lock or leave corrupted data
861
+ # in the queue, leading other readers/writers to hang.
862
+ #
863
+ # Hence,
864
+ # + For worker processes, we only do so (for their output
865
+ # queues, i.e., `worker_result_queue`) before exiting.
866
+ # + For `pin_memory_thread`, its output queue `data_queue` is a
867
+ # `queue.Queue` that does blocking `put` if the queue is full.
868
+ # So there is no above problem, but as a result, in
869
+ # `_pin_memory_loop`, we do need to wrap the `put` in a loop
870
+ # that breaks not only upon success, but also when the main
871
+ # process stops reading, i.e., is shutting down.
872
+ # + For loader process, we `cancel_join_thread()` for all
873
+ # `_index_queues` because the whole purpose of workers and
874
+ # `pin_memory_thread` is to serve the loader process. If
875
+ # loader process is already exiting, we don't really care if
876
+ # the queues are corrupted.
877
+ #
878
+ #
879
+ # Now let's get back to 1:
880
+ # how we gracefully exit the workers when the last reference to the
881
+ # iterator is gone.
882
+ #
883
+ # To achieve this, we implement the following logic along with the design
884
+ # choices mentioned above:
885
+ #
886
+ # `workers_done_event`:
887
+ # A `multiprocessing.Event` shared among the main process and all worker
888
+ # processes. This is used to signal the workers that the iterator is
889
+ # shutting down. After it is set, they will not send processed data to
890
+ # queues anymore, and only wait for the final `None` before exiting.
891
+ # `done_event` isn't strictly needed. I.e., we can just check for `None`
892
+ # from the input queue, but it allows us to skip wasting resources
893
+ # processing data if we are already shutting down.
894
+ #
895
+ # `pin_memory_thread_done_event`:
896
+ # A `threading.Event` for a similar purpose to that of
897
+ # `workers_done_event`, but is for the `pin_memory_thread`. The reason
898
+ # that separate events are needed is that `pin_memory_thread` reads from
899
+ # the output queue of the workers. But the workers, upon seeing that
900
+ # `workers_done_event` is set, only wants to see the final `None`, and is
901
+ # not required to flush all data in the output queue (e.g., it may call
902
+ # `cancel_join_thread` on that queue if its `IterableDataset` iterator
903
+ # happens to exhaust coincidentally, which is out of the control of the
904
+ # main process). Thus, since we will exit `pin_memory_thread` before the
905
+ # workers (see below), two separete events are used.
906
+ #
907
+ # NOTE: In short, the protocol is that the main process will set these
908
+ # `done_event`s and then the corresponding processes/threads a `None`,
909
+ # and that they may exit at any time after receiving the `None`.
910
+ #
911
+ # NOTE: Using `None` as the final signal is valid, since normal data will
912
+ # always be a 2-tuple with the 1st element being the index of the data
913
+ # transferred (different from dataset index/key), and the 2nd being
914
+ # either the dataset key or the data sample (depending on which part
915
+ # of the data model the queue is at).
916
+ #
917
+ # [ worker processes ]
918
+ # While loader process is alive:
919
+ # Get from `index_queue`.
920
+ # If get anything else,
921
+ # Check `workers_done_event`.
922
+ # If set, continue to next iteration
923
+ # i.e., keep getting until see the `None`, then exit.
924
+ # Otherwise, process data:
925
+ # If is fetching from an `IterableDataset` and the iterator
926
+ # is exhausted, send an `_IterableDatasetStopIteration`
927
+ # object to signal iteration end. The main process, upon
928
+ # receiving such an object, will send `None` to this
929
+ # worker and not use the corresponding `index_queue`
930
+ # anymore.
931
+ # If timed out,
932
+ # No matter `workers_done_event` is set (still need to see `None`)
933
+ # or not, must continue to next iteration.
934
+ # (outside loop)
935
+ # If `workers_done_event` is set, (this can be False with `IterableDataset`)
936
+ # `data_queue.cancel_join_thread()`. (Everything is ending here:
937
+ # main process won't read from it;
938
+ # other workers will also call
939
+ # `cancel_join_thread`.)
940
+ #
941
+ # [ pin_memory_thread ]
942
+ # # No need to check main thread. If this thread is alive, the main loader
943
+ # # thread must be alive, because this thread is set as daemonic.
944
+ # While `pin_memory_thread_done_event` is not set:
945
+ # Get from `worker_result_queue`.
946
+ # If timed out, continue to get in the next iteration.
947
+ # Otherwise, process data.
948
+ # While `pin_memory_thread_done_event` is not set:
949
+ # Put processed data to `data_queue` (a `queue.Queue` with blocking put)
950
+ # If timed out, continue to put in the next iteration.
951
+ # Otherwise, break, i.e., continuing to the out loop.
952
+ #
953
+ # NOTE: we don't check the status of the main thread because
954
+ # 1. if the process is killed by fatal signal, `pin_memory_thread`
955
+ # ends.
956
+ # 2. in other cases, either the cleaning-up in __del__ or the
957
+ # automatic exit of daemonic thread will take care of it.
958
+ # This won't busy-wait either because `.get(timeout)` does not
959
+ # busy-wait.
960
+ #
961
+ # [ main process ]
962
+ # In the DataLoader Iter's `__del__`
963
+ # b. Exit `pin_memory_thread`
964
+ # i. Set `pin_memory_thread_done_event`.
965
+ # ii Put `None` in `worker_result_queue`.
966
+ # iii. Join the `pin_memory_thread`.
967
+ # iv. `worker_result_queue.cancel_join_thread()`.
968
+ #
969
+ # c. Exit the workers.
970
+ # i. Set `workers_done_event`.
971
+ # ii. Put `None` in each worker's `index_queue`.
972
+ # iii. Join the workers.
973
+ # iv. Call `.cancel_join_thread()` on each worker's `index_queue`.
974
+ #
975
+ # NOTE: (c) is better placed after (b) because it may leave corrupted
976
+ # data in `worker_result_queue`, which `pin_memory_thread`
977
+ # reads from, in which case the `pin_memory_thread` can only
978
+ # happen at timing out, which is slow. Nonetheless, same thing
979
+ # happens if a worker is killed by signal at unfortunate times,
980
+ # but in other cases, we are better off having a non-corrupted
981
+ # `worker_result_queue` for `pin_memory_thread`.
982
+ #
983
+ # NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b)
984
+ # can be omitted
985
+ #
986
+ # NB: `done_event`s isn't strictly needed. E.g., we can just check for
987
+ # `None` from `index_queue`, but it allows us to skip wasting resources
988
+ # processing indices already in `index_queue` if we are already shutting
989
+ # down.
990
+
991
+ def __init__(self, loader):
992
+ super().__init__(loader)
993
+
994
+ self._prefetch_factor = loader.prefetch_factor
995
+
996
+ assert self._num_workers > 0
997
+ assert self._prefetch_factor > 0
998
+
999
+ if loader.multiprocessing_context is None:
1000
+ multiprocessing_context = multiprocessing
1001
+ else:
1002
+ multiprocessing_context = loader.multiprocessing_context
1003
+
1004
+ self._worker_init_fn = loader.worker_init_fn
1005
+
1006
+ # Adds forward compatibilities so classic DataLoader can work with DataPipes:
1007
+ # Additional worker init function will take care of sharding in MP and Distributed
1008
+ if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
1009
+ self._worker_init_fn = functools.partial(
1010
+ _sharding_worker_init_fn, self._worker_init_fn, self._world_size, self._rank)
1011
+
1012
+ # No certainty which module multiprocessing_context is
1013
+ self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
1014
+ self._worker_pids_set = False
1015
+ self._shutdown = False
1016
+ self._workers_done_event = multiprocessing_context.Event()
1017
+
1018
+ self._index_queues = []
1019
+ self._workers = []
1020
+ for i in range(self._num_workers):
1021
+ # No certainty which module multiprocessing_context is
1022
+ index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
1023
+ # Need to `cancel_join_thread` here!
1024
+ # See sections (2) and (3b) above.
1025
+ index_queue.cancel_join_thread()
1026
+ w = multiprocessing_context.Process(
1027
+ target=_utils.worker._worker_loop,
1028
+ args=(self._dataset_kind, self._dataset, index_queue,
1029
+ self._worker_result_queue, self._workers_done_event,
1030
+ self._auto_collation, self._collate_fn, self._drop_last,
1031
+ self._base_seed, self._worker_init_fn, i, self._num_workers,
1032
+ self._persistent_workers, self._shared_seed))
1033
+ w.daemon = True
1034
+ # NB: Process.start() actually take some time as it needs to
1035
+ # start a process and pass the arguments over via a pipe.
1036
+ # Therefore, we only add a worker to self._workers list after
1037
+ # it started, so that we do not call .join() if program dies
1038
+ # before it starts, and __del__ tries to join but will get:
1039
+ # AssertionError: can only join a started process.
1040
+ w.start()
1041
+ self._index_queues.append(index_queue)
1042
+ self._workers.append(w)
1043
+
1044
+ if self._pin_memory:
1045
+ self._pin_memory_thread_done_event = threading.Event()
1046
+
1047
+ # Queue is not type-annotated
1048
+ self._data_queue = queue.Queue() # type: ignore[var-annotated]
1049
+ if self._pin_memory_device == "xpu":
1050
+ current_device = torch.xpu.current_device() # type: ignore[attr-defined]
1051
+ elif self._pin_memory_device == torch._C._get_privateuse1_backend_name():
1052
+ custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name())
1053
+ current_device = custom_device_mod.current_device()
1054
+ else:
1055
+ current_device = torch.cuda.current_device() # choose cuda for default
1056
+ pin_memory_thread = threading.Thread(
1057
+ target=_utils.pin_memory._pin_memory_loop,
1058
+ args=(self._worker_result_queue, self._data_queue,
1059
+ current_device,
1060
+ self._pin_memory_thread_done_event, self._pin_memory_device))
1061
+ pin_memory_thread.daemon = True
1062
+ pin_memory_thread.start()
1063
+ # Similar to workers (see comment above), we only register
1064
+ # pin_memory_thread once it is started.
1065
+ self._pin_memory_thread = pin_memory_thread
1066
+ else:
1067
+ self._data_queue = self._worker_result_queue # type: ignore[assignment]
1068
+
1069
+ # In some rare cases, persistent workers (daemonic processes)
1070
+ # would be terminated before `__del__` of iterator is invoked
1071
+ # when main process exits
1072
+ # It would cause failure when pin_memory_thread tries to read
1073
+ # corrupted data from worker_result_queue
1074
+ # atexit is used to shutdown thread and child processes in the
1075
+ # right sequence before main process exits
1076
+ if self._persistent_workers and self._pin_memory:
1077
+ import atexit
1078
+ for w in self._workers:
1079
+ atexit.register(_MultiProcessingDataLoaderIter._clean_up_worker, w)
1080
+
1081
+ # .pid can be None only before process is spawned (not the case, so ignore)
1082
+ _utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore[misc]
1083
+ _utils.signal_handling._set_SIGCHLD_handler()
1084
+ self._worker_pids_set = True
1085
+ self._reset(loader, first_iter=True)
1086
+
1087
+ def _reset(self, loader, first_iter=False):
1088
+ super()._reset(loader, first_iter)
1089
+ self._send_idx = 0 # idx of the next task to be sent to workers
1090
+ self._rcvd_idx = 0 # idx of the next task to be returned in __next__
1091
+ # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx).
1092
+ # map: task idx => - (worker_id,) if data isn't fetched (outstanding)
1093
+ # \ (worker_id, data) if data is already fetched (out-of-order)
1094
+ self._task_info = {}
1095
+ self._tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1)
1096
+ # A list of booleans representing whether each worker still has work to
1097
+ # do, i.e., not having exhausted its iterable dataset object. It always
1098
+ # contains all `True`s if not using an iterable-style dataset
1099
+ # (i.e., if kind != Iterable).
1100
+ # Not that this indicates that a worker still has work to do *for this epoch*.
1101
+ # It does not mean that a worker is dead. In case of `_persistent_workers`,
1102
+ # the worker will be reset to available in the next epoch.
1103
+ self._workers_status = [True for i in range(self._num_workers)]
1104
+ # Reset the worker queue cycle so it resumes next epoch at worker 0
1105
+ self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers))
1106
+ # We resume the prefetching in case it was enabled
1107
+ if not first_iter:
1108
+ for idx in range(self._num_workers):
1109
+ self._index_queues[idx].put(_utils.worker._ResumeIteration(self._shared_seed))
1110
+ resume_iteration_cnt = self._num_workers
1111
+ while resume_iteration_cnt > 0:
1112
+ return_idx, return_data = self._get_data()
1113
+ if isinstance(return_idx, _utils.worker._ResumeIteration):
1114
+ assert return_data is None
1115
+ resume_iteration_cnt -= 1
1116
+ # prime the prefetch loop
1117
+ for _ in range(self._prefetch_factor * self._num_workers):
1118
+ self._try_put_index()
1119
+
1120
+ def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL):
1121
+ # Tries to fetch data from `self._data_queue` once for a given timeout.
1122
+ # This can also be used as inner loop of fetching without timeout, with
1123
+ # the sender status as the loop condition.
1124
+ #
1125
+ # This raises a `RuntimeError` if any worker died expectedly. This error
1126
+ # can come from either the SIGCHLD handler in `_utils/signal_handling.py`
1127
+ # (only for non-Windows platforms), or the manual check below on errors
1128
+ # and timeouts.
1129
+ #
1130
+ # Returns a 2-tuple:
1131
+ # (bool: whether successfully get data, any: data if successful else None)
1132
+ try:
1133
+ data = self._data_queue.get(timeout=timeout)
1134
+ return (True, data)
1135
+ except Exception as e:
1136
+ # At timeout and error, we manually check whether any worker has
1137
+ # failed. Note that this is the only mechanism for Windows to detect
1138
+ # worker failures.
1139
+ failed_workers = []
1140
+ for worker_id, w in enumerate(self._workers):
1141
+ if self._workers_status[worker_id] and not w.is_alive():
1142
+ failed_workers.append(w)
1143
+ self._mark_worker_as_unavailable(worker_id)
1144
+ if len(failed_workers) > 0:
1145
+ pids_str = ', '.join(str(w.pid) for w in failed_workers)
1146
+ raise RuntimeError(f'DataLoader worker (pid(s) {pids_str}) exited unexpectedly') from e
1147
+ if isinstance(e, queue.Empty):
1148
+ return (False, None)
1149
+ import tempfile
1150
+ import errno
1151
+ try:
1152
+ # Raise an exception if we are this close to the FDs limit.
1153
+ # Apparently, trying to open only one file is not a sufficient
1154
+ # test.
1155
+ # See NOTE [ DataLoader on Linux and open files limit ]
1156
+ fds_limit_margin = 10
1157
+ fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)]
1158
+ except OSError as e:
1159
+ if e.errno == errno.EMFILE:
1160
+ raise RuntimeError(
1161
+ "Too many open files. Communication with the"
1162
+ " workers is no longer possible. Please increase the"
1163
+ " limit using `ulimit -n` in the shell or change the"
1164
+ " sharing strategy by calling"
1165
+ " `torch.multiprocessing.set_sharing_strategy('file_system')`"
1166
+ " at the beginning of your code") from None
1167
+ raise
1168
+
1169
+ # NOTE [ DataLoader on Linux and open files limit ]
1170
+ #
1171
+ # On Linux when DataLoader is used with multiprocessing we pass the data between
1172
+ # the root process and the workers through SHM files. We remove those files from
1173
+ # the filesystem as soon as they are created and keep them alive by
1174
+ # passing around their file descriptors through AF_UNIX sockets. (See
1175
+ # docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in
1176
+ # the wiki (https://github.com/pytorch/pytorch/wiki).)
1177
+ #
1178
+ # This sometimes leads us to exceeding the open files limit. When that happens,
1179
+ # and the offending file descriptor is coming over a socket, the `socket` Python
1180
+ # package silently strips the file descriptor from the message, setting only the
1181
+ # `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that
1182
+ # it _indicates that some control data were discarded due to lack of space in
1183
+ # the buffer for ancillary data_). This might reflect the C implementation of
1184
+ # AF_UNIX sockets.
1185
+ #
1186
+ # This behaviour can be reproduced with the script and instructions at the
1187
+ # bottom of this note.
1188
+ #
1189
+ # When that happens, the standard Python `multiprocessing` (and not
1190
+ # `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata`
1191
+ #
1192
+ # Sometimes, instead of the FD being stripped, you may get an `OSError:
1193
+ # Too many open files`, both in the script below and in DataLoader. However,
1194
+ # this is rare and seems to be nondeterministic.
1195
+ #
1196
+ #
1197
+ # #!/usr/bin/env python3
1198
+ # import sys
1199
+ # import socket
1200
+ # import os
1201
+ # import array
1202
+ # import shutil
1203
+ # import socket
1204
+ #
1205
+ #
1206
+ # if len(sys.argv) != 4:
1207
+ # print("Usage: ", sys.argv[0], " tmp_dirname iteration (send|recv)")
1208
+ # sys.exit(1)
1209
+ #
1210
+ # if __name__ == '__main__':
1211
+ # dirname = sys.argv[1]
1212
+ # sock_path = dirname + "/sock"
1213
+ # iterations = int(sys.argv[2])
1214
+ # def dummy_path(i):
1215
+ # return dirname + "/" + str(i) + ".dummy"
1216
+ #
1217
+ #
1218
+ # if sys.argv[3] == 'send':
1219
+ # while not os.path.exists(sock_path):
1220
+ # pass
1221
+ # client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
1222
+ # client.connect(sock_path)
1223
+ # for i in range(iterations):
1224
+ # fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT)
1225
+ # ancdata = array.array('i', [fd])
1226
+ # msg = bytes([i % 256])
1227
+ # print("Sending fd ", fd, " (iteration #", i, ")")
1228
+ # client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)])
1229
+ #
1230
+ #
1231
+ # else:
1232
+ # assert sys.argv[3] == 'recv'
1233
+ #
1234
+ # if os.path.exists(dirname):
1235
+ # raise Exception("Directory exists")
1236
+ #
1237
+ # os.mkdir(dirname)
1238
+ #
1239
+ # print("Opening socket...")
1240
+ # server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
1241
+ # server.bind(sock_path)
1242
+ #
1243
+ # print("Listening...")
1244
+ # for i in range(iterations):
1245
+ # a = array.array('i')
1246
+ # msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize))
1247
+ # assert(len(ancdata) == 1)
1248
+ # cmsg_level, cmsg_type, cmsg_data = ancdata[0]
1249
+ # a.frombytes(cmsg_data)
1250
+ # print("Received fd ", a[0], " (iteration #", i, ")")
1251
+ #
1252
+ # shutil.rmtree(dirname)
1253
+ #
1254
+ # Steps to reproduce:
1255
+ #
1256
+ # 1. Run two shells and set lower file descriptor limit in the receiving one:
1257
+ # (shell1) ulimit -n 1020
1258
+ # (shell2) ulimit -n 1022
1259
+ #
1260
+ # 2. Run the script above with the `recv` option in the first shell
1261
+ # (shell1) ./test_socket.py sock_tmp 1017 recv
1262
+ #
1263
+ # 3. Run the script with the `send` option in the second shell:
1264
+ # (shell2) ./test_socket.py sock_tmp 1017 send
1265
+
1266
+ def _get_data(self):
1267
+ # Fetches data from `self._data_queue`.
1268
+ #
1269
+ # We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds,
1270
+ # which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)`
1271
+ # in a loop. This is the only mechanism to detect worker failures for
1272
+ # Windows. For other platforms, a SIGCHLD handler is also used for
1273
+ # worker failure detection.
1274
+ #
1275
+ # If `pin_memory=True`, we also need check if `pin_memory_thread` had
1276
+ # died at timeouts.
1277
+ if self._timeout > 0:
1278
+ success, data = self._try_get_data(self._timeout)
1279
+ if success:
1280
+ return data
1281
+ else:
1282
+ raise RuntimeError(f'DataLoader timed out after {self._timeout} seconds')
1283
+ elif self._pin_memory:
1284
+ while self._pin_memory_thread.is_alive():
1285
+ success, data = self._try_get_data()
1286
+ if success:
1287
+ return data
1288
+ else:
1289
+ # while condition is false, i.e., pin_memory_thread died.
1290
+ raise RuntimeError('Pin memory thread exited unexpectedly')
1291
+ # In this case, `self._data_queue` is a `queue.Queue`,. But we don't
1292
+ # need to call `.task_done()` because we don't use `.join()`.
1293
+ else:
1294
+ while True:
1295
+ success, data = self._try_get_data()
1296
+ if success:
1297
+ return data
1298
+
1299
+ def _next_data(self):
1300
+ while True:
1301
+ # If the worker responsible for `self._rcvd_idx` has already ended
1302
+ # and was unable to fulfill this task (due to exhausting an `IterableDataset`),
1303
+ # we try to advance `self._rcvd_idx` to find the next valid index.
1304
+ #
1305
+ # This part needs to run in the loop because both the `self._get_data()`
1306
+ # call and `_IterableDatasetStopIteration` check below can mark
1307
+ # extra worker(s) as dead.
1308
+ while self._rcvd_idx < self._send_idx:
1309
+ info = self._task_info[self._rcvd_idx]
1310
+ worker_id = info[0]
1311
+ if len(info) == 2 or self._workers_status[worker_id]: # has data or is still active
1312
+ break
1313
+ del self._task_info[self._rcvd_idx]
1314
+ self._rcvd_idx += 1
1315
+ else:
1316
+ # no valid `self._rcvd_idx` is found (i.e., didn't break)
1317
+ if not self._persistent_workers:
1318
+ self._shutdown_workers()
1319
+ raise StopIteration
1320
+
1321
+ # Now `self._rcvd_idx` is the batch index we want to fetch
1322
+
1323
+ # Check if the next sample has already been generated
1324
+ if len(self._task_info[self._rcvd_idx]) == 2:
1325
+ data = self._task_info.pop(self._rcvd_idx)[1]
1326
+ return self._process_data(data)
1327
+
1328
+ assert not self._shutdown and self._tasks_outstanding > 0
1329
+ idx, data = self._get_data()
1330
+ self._tasks_outstanding -= 1
1331
+ if self._dataset_kind == _DatasetKind.Iterable:
1332
+ # Check for _IterableDatasetStopIteration
1333
+ if isinstance(data, _utils.worker._IterableDatasetStopIteration):
1334
+ if self._persistent_workers:
1335
+ self._workers_status[data.worker_id] = False
1336
+ else:
1337
+ self._mark_worker_as_unavailable(data.worker_id)
1338
+ self._try_put_index()
1339
+ continue
1340
+
1341
+ if idx != self._rcvd_idx:
1342
+ # store out-of-order samples
1343
+ self._task_info[idx] += (data,)
1344
+ else:
1345
+ del self._task_info[idx]
1346
+ return self._process_data(data)
1347
+
1348
+ def _try_put_index(self):
1349
+ assert self._tasks_outstanding < self._prefetch_factor * self._num_workers
1350
+
1351
+ try:
1352
+ index = self._next_index()
1353
+ except StopIteration:
1354
+ return
1355
+ for _ in range(self._num_workers): # find the next active worker, if any
1356
+ worker_queue_idx = next(self._worker_queue_idx_cycle)
1357
+ if self._workers_status[worker_queue_idx]:
1358
+ break
1359
+ else:
1360
+ # not found (i.e., didn't break)
1361
+ return
1362
+
1363
+ self._index_queues[worker_queue_idx].put((self._send_idx, index)) # type: ignore[possibly-undefined]
1364
+ self._task_info[self._send_idx] = (worker_queue_idx,)
1365
+ self._tasks_outstanding += 1
1366
+ self._send_idx += 1
1367
+
1368
+ def _process_data(self, data):
1369
+ self._rcvd_idx += 1
1370
+ self._try_put_index()
1371
+ if isinstance(data, ExceptionWrapper):
1372
+ data.reraise()
1373
+ return data
1374
+
1375
+ def _mark_worker_as_unavailable(self, worker_id, shutdown=False):
1376
+ # Mark a worker as having finished its work e.g., due to
1377
+ # exhausting an `IterableDataset`. This should be used only when this
1378
+ # `_MultiProcessingDataLoaderIter` is going to continue running.
1379
+
1380
+ assert self._workers_status[worker_id] or (self._persistent_workers and shutdown)
1381
+
1382
+ # Signal termination to that specific worker.
1383
+ q = self._index_queues[worker_id]
1384
+ # Indicate that no more data will be put on this queue by the current
1385
+ # process.
1386
+ q.put(None)
1387
+
1388
+ # Note that we don't actually join the worker here, nor do we remove the
1389
+ # worker's pid from C side struct because (1) joining may be slow, and
1390
+ # (2) since we don't join, the worker may still raise error, and we
1391
+ # prefer capturing those, rather than ignoring them, even though they
1392
+ # are raised after the worker has finished its job.
1393
+ # Joinning is deferred to `_shutdown_workers`, which it is called when
1394
+ # all workers finish their jobs (e.g., `IterableDataset` replicas) or
1395
+ # when this iterator is garbage collected.
1396
+
1397
+ self._workers_status[worker_id] = False
1398
+
1399
+ assert self._workers_done_event.is_set() == shutdown
1400
+
1401
+ def _shutdown_workers(self):
1402
+ # Called when shutting down this `_MultiProcessingDataLoaderIter`.
1403
+ # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on
1404
+ # the logic of this function.
1405
+ if _utils is None or _utils.python_exit_status is True or _utils.python_exit_status is None:
1406
+ # See (2) of the note. If Python is shutting down, do no-op.
1407
+ return
1408
+ # Normal exit when last reference is gone / iterator is depleted.
1409
+ # See (1) and the second half of the note.
1410
+ if not self._shutdown:
1411
+ self._shutdown = True
1412
+ try:
1413
+ # Normal exit when last reference is gone / iterator is depleted.
1414
+ # See (1) and the second half of the note.
1415
+
1416
+ # Exit `pin_memory_thread` first because exiting workers may leave
1417
+ # corrupted data in `worker_result_queue` which `pin_memory_thread`
1418
+ # reads from.
1419
+ if hasattr(self, '_pin_memory_thread'):
1420
+ # Use hasattr in case error happens before we set the attribute.
1421
+ self._pin_memory_thread_done_event.set()
1422
+ # Send something to pin_memory_thread in case it is waiting
1423
+ # so that it can wake up and check `pin_memory_thread_done_event`
1424
+ self._worker_result_queue.put((None, None))
1425
+ self._pin_memory_thread.join()
1426
+ self._worker_result_queue.cancel_join_thread()
1427
+ self._worker_result_queue.close()
1428
+
1429
+ # Exit workers now.
1430
+ self._workers_done_event.set()
1431
+ for worker_id in range(len(self._workers)):
1432
+ # Get number of workers from `len(self._workers)` instead of
1433
+ # `self._num_workers` in case we error before starting all
1434
+ # workers.
1435
+ # If we are using workers_status with persistent_workers
1436
+ # we have to shut it down because the worker is paused
1437
+ if self._persistent_workers or self._workers_status[worker_id]:
1438
+ self._mark_worker_as_unavailable(worker_id, shutdown=True)
1439
+ for w in self._workers:
1440
+ # We should be able to join here, but in case anything went
1441
+ # wrong, we set a timeout and if the workers fail to join,
1442
+ # they are killed in the `finally` block.
1443
+ w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
1444
+ for q in self._index_queues:
1445
+ q.cancel_join_thread()
1446
+ q.close()
1447
+ finally:
1448
+ # Even though all this function does is putting into queues that
1449
+ # we have called `cancel_join_thread` on, weird things can
1450
+ # happen when a worker is killed by a signal, e.g., hanging in
1451
+ # `Event.set()`. So we need to guard this with SIGCHLD handler,
1452
+ # and remove pids from the C side data structure only at the
1453
+ # end.
1454
+ #
1455
+ # FIXME: Unfortunately, for Windows, we are missing a worker
1456
+ # error detection mechanism here in this function, as it
1457
+ # doesn't provide a SIGCHLD handler.
1458
+ if self._worker_pids_set:
1459
+ _utils.signal_handling._remove_worker_pids(id(self))
1460
+ self._worker_pids_set = False
1461
+ for w in self._workers:
1462
+ if w.is_alive():
1463
+ # Existing mechanisms try to make the workers exit
1464
+ # peacefully, but in case that we unfortunately reach
1465
+ # here, which we shouldn't, (e.g., pytorch/pytorch#39570),
1466
+ # we kill the worker.
1467
+ w.terminate()
1468
+
1469
+ # staticmethod is used to remove reference to `_MultiProcessingDataLoaderIter`
1470
+ @staticmethod
1471
+ def _clean_up_worker(w):
1472
+ try:
1473
+ w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
1474
+ finally:
1475
+ if w.is_alive():
1476
+ w.terminate()
1477
+
1478
+ def __del__(self):
1479
+ self._shutdown_workers()
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from . import iter
2
+ from . import map
3
+ from . import dataframe
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from functools import wraps
3
+ from typing import Any, Callable, Optional, Type, Union, get_type_hints
4
+ from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe
5
+ from torch.utils.data.datapipes._typing import _DataPipeMeta
6
+
7
+
8
+ ######################################################
9
+ # Functional API
10
+ ######################################################
11
+ class functional_datapipe:
12
+ name: str
13
+
14
+ def __init__(self, name: str, enable_df_api_tracing=False) -> None:
15
+ """
16
+ Define a functional datapipe.
17
+
18
+ Args:
19
+ enable_df_api_tracing - if set, any returned DataPipe would accept
20
+ DataFrames API in tracing mode.
21
+ """
22
+ self.name = name
23
+ self.enable_df_api_tracing = enable_df_api_tracing
24
+
25
+ def __call__(self, cls):
26
+ if issubclass(cls, IterDataPipe):
27
+ if isinstance(cls, Type): # type: ignore[arg-type]
28
+ if not isinstance(cls, _DataPipeMeta):
29
+ raise TypeError('`functional_datapipe` can only decorate IterDataPipe')
30
+ # with non_deterministic decorator
31
+ else:
32
+ if not isinstance(cls, non_deterministic) and \
33
+ not (hasattr(cls, '__self__') and
34
+ isinstance(cls.__self__, non_deterministic)):
35
+ raise TypeError('`functional_datapipe` can only decorate IterDataPipe')
36
+ IterDataPipe.register_datapipe_as_function(self.name, cls, enable_df_api_tracing=self.enable_df_api_tracing)
37
+ elif issubclass(cls, MapDataPipe):
38
+ MapDataPipe.register_datapipe_as_function(self.name, cls)
39
+
40
+ return cls
41
+
42
+
43
+ ######################################################
44
+ # Determinism
45
+ ######################################################
46
+ _determinism: bool = False
47
+
48
+
49
+ class guaranteed_datapipes_determinism:
50
+ prev: bool
51
+
52
+ def __init__(self) -> None:
53
+ global _determinism
54
+ self.prev = _determinism
55
+ _determinism = True
56
+
57
+ def __enter__(self) -> None:
58
+ pass
59
+
60
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
61
+ global _determinism
62
+ _determinism = self.prev
63
+
64
+
65
+ class non_deterministic:
66
+ cls: Optional[Type[IterDataPipe]] = None
67
+ # TODO: Lambda for picking
68
+ deterministic_fn: Callable[[], bool]
69
+
70
+ def __init__(self, arg: Union[Type[IterDataPipe], Callable[[], bool]]) -> None:
71
+ # 1. Decorator doesn't have any argument
72
+ if isinstance(arg, Type): # type: ignore[arg-type]
73
+ if not issubclass(arg, IterDataPipe): # type: ignore[arg-type]
74
+ raise TypeError("Only `IterDataPipe` can be decorated with `non_deterministic`"
75
+ f", but {arg.__name__} is found")
76
+ self.cls = arg # type: ignore[assignment]
77
+ # 2. Decorator has an argument of a function
78
+ # This class should behave differently given different inputs. Use this
79
+ # function to verify the determinism for each instance.
80
+ # When the function returns True, the instance is non-deterministic. Otherwise,
81
+ # the instance is a deterministic DataPipe.
82
+ elif isinstance(arg, Callable): # type:ignore[arg-type]
83
+ self.deterministic_fn = arg # type: ignore[assignment, misc]
84
+ else:
85
+ raise TypeError(f"{arg} can not be decorated by non_deterministic")
86
+
87
+ def __call__(self, *args, **kwargs):
88
+ global _determinism
89
+ # Decorate IterDataPipe
90
+ if self.cls is not None:
91
+ if _determinism:
92
+ raise TypeError("{} is non-deterministic, but you set 'guaranteed_datapipes_determinism'. "
93
+ "You can turn off determinism for this DataPipe if that is acceptable "
94
+ "for your application".format(self.cls.__name__))
95
+ return self.cls(*args, **kwargs) # type: ignore[call-arg]
96
+
97
+ # Decorate with a functional argument
98
+ if not (isinstance(args[0], Type) and # type: ignore[arg-type]
99
+ issubclass(args[0], IterDataPipe)):
100
+ raise TypeError(f"Only `IterDataPipe` can be decorated, but {args[0].__name__} is found")
101
+ self.cls = args[0]
102
+ return self.deterministic_wrapper_fn
103
+
104
+ def deterministic_wrapper_fn(self, *args, **kwargs) -> IterDataPipe:
105
+ res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc]
106
+ if not isinstance(res, bool):
107
+ raise TypeError("deterministic_fn of `non_deterministic` decorator is required "
108
+ f"to return a boolean value, but {type(res)} is found")
109
+ global _determinism
110
+ if _determinism and res:
111
+ raise TypeError(f"{self.cls.__name__} is non-deterministic with the inputs, but you set " # type: ignore[union-attr]
112
+ "'guaranteed_datapipes_determinism'. You can turn off determinism "
113
+ "for this DataPipe if that is acceptable for your application"
114
+ )
115
+ return self.cls(*args, **kwargs) # type: ignore[call-arg, misc]
116
+
117
+
118
+ ######################################################
119
+ # Type validation
120
+ ######################################################
121
+ # Validate each argument of DataPipe with hint as a subtype of the hint.
122
+ def argument_validation(f):
123
+ signature = inspect.signature(f)
124
+ hints = get_type_hints(f)
125
+
126
+ @wraps(f)
127
+ def wrapper(*args, **kwargs):
128
+ bound = signature.bind(*args, **kwargs)
129
+ for argument_name, value in bound.arguments.items():
130
+ if argument_name in hints and isinstance(hints[argument_name], _DataPipeMeta):
131
+ hint = hints[argument_name]
132
+ if not isinstance(value, IterDataPipe):
133
+ raise TypeError(f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}")
134
+ if not value.type.issubtype(hint.type):
135
+ raise TypeError(f"Expected type of argument '{argument_name}' as a subtype of "
136
+ f"hint {hint.type}, but found {value.type}"
137
+ )
138
+
139
+ return f(*args, **kwargs)
140
+
141
+ return wrapper
142
+
143
+
144
+ # Default value is True
145
+ _runtime_validation_enabled: bool = True
146
+
147
+
148
+ class runtime_validation_disabled:
149
+ prev: bool
150
+
151
+ def __init__(self) -> None:
152
+ global _runtime_validation_enabled
153
+ self.prev = _runtime_validation_enabled
154
+ _runtime_validation_enabled = False
155
+
156
+ def __enter__(self) -> None:
157
+ pass
158
+
159
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
160
+ global _runtime_validation_enabled
161
+ _runtime_validation_enabled = self.prev
162
+
163
+
164
+ # Runtime checking
165
+ # Validate output data is subtype of return hint
166
+ def runtime_validation(f):
167
+ # TODO:
168
+ # Can be extended to validate '__getitem__' and nonblocking
169
+ if f.__name__ != '__iter__':
170
+ raise TypeError(f"Can not decorate function {f.__name__} with 'runtime_validation'")
171
+
172
+ @wraps(f)
173
+ def wrapper(self):
174
+ global _runtime_validation_enabled
175
+ if not _runtime_validation_enabled:
176
+ yield from f(self)
177
+ else:
178
+ it = f(self)
179
+ for d in it:
180
+ if not self.type.issubtype_of_instance(d):
181
+ raise RuntimeError(f"Expected an instance as subtype of {self.type}, but found {d}({type(d)})")
182
+ yield d
183
+
184
+ return wrapper
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import functools
3
+ from enum import Enum
4
+
5
+ import torch.autograd
6
+
7
+
8
+ class _SnapshotState(Enum):
9
+ r"""
10
+ These are the snapshotting-related states that IterDataPipes can be in.
11
+
12
+ `NotStarted` - allows you to restore a snapshot and create an iterator with reset
13
+ `Restored` - cannot restore again, allows you to create an iterator without resetting the DataPipe
14
+ `Iterating` - can restore, will reset if you create a new iterator
15
+ """
16
+
17
+ NotStarted = 0
18
+ Restored = 1
19
+ Iterating = 2
20
+
21
+
22
+ def _simplify_obj_name(obj) -> str:
23
+ """Simplify the display strings of objects for the purpose of rendering within DataPipe error messages."""
24
+ if inspect.isfunction(obj):
25
+ return obj.__name__
26
+ else:
27
+ return repr(obj)
28
+
29
+
30
+ def _strip_datapipe_from_name(name: str) -> str:
31
+ return name.replace("IterDataPipe", "").replace("MapDataPipe", "")
32
+
33
+
34
+ def _generate_input_args_string(obj):
35
+ """Generate a string for the input arguments of an object."""
36
+ signature = inspect.signature(obj.__class__)
37
+ input_param_names = set()
38
+ for param_name in signature.parameters.keys():
39
+ input_param_names.add(param_name)
40
+ result = []
41
+ for name, value in inspect.getmembers(obj):
42
+ if name in input_param_names:
43
+ result.append((name, _simplify_obj_name(value)))
44
+ return ', '.join([f'{name}={value}' for name, value in result])
45
+
46
+
47
+ def _generate_iterdatapipe_msg(datapipe, simplify_dp_name: bool = False):
48
+ output_string = f"{datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
49
+ if simplify_dp_name:
50
+ output_string = _strip_datapipe_from_name(output_string)
51
+ return output_string
52
+
53
+
54
+ def _gen_invalid_iterdatapipe_msg(datapipe):
55
+ return ("This iterator has been invalidated because another iterator has been created "
56
+ f"from the same IterDataPipe: {_generate_iterdatapipe_msg(datapipe)}\n"
57
+ "This may be caused multiple references to the same IterDataPipe. We recommend "
58
+ "using `.fork()` if that is necessary.")
59
+
60
+
61
+ _feedback_msg = ("\nFor feedback regarding this single iterator per IterDataPipe constraint, feel free "
62
+ "to comment on this issue: https://github.com/pytorch/data/issues/45.")
63
+
64
+
65
+ def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None:
66
+ r"""
67
+ Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception.
68
+
69
+ In the case of ChildDataPipe, the ID gets compared to the one stored in `main_datapipe` as well.
70
+ """
71
+ if next_method_exists:
72
+ # This is the case where `IterDataPipe` has both `__iter__` and `__next__`.
73
+ # The `_valid_iterator_id` should either be never set (`None`), or set by at most one
74
+ # iterator (`0`). Otherwise, it means there are multiple iterators.
75
+ if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0:
76
+ extra_msg = "\nNote that this exception is raised inside your IterDataPipe's a `__next__` method"
77
+ raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg)
78
+ elif hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
79
+ if hasattr(datapipe, "_check_valid_iterator_id"):
80
+ if not datapipe._check_valid_iterator_id(iterator_id):
81
+ raise RuntimeError("This iterator has been invalidated, because a new iterator has been created "
82
+ f"from one of the ChildDataPipes of "
83
+ f"{_generate_iterdatapipe_msg(datapipe.main_datapipe)}." + _feedback_msg)
84
+ else:
85
+ raise RuntimeError("ChildDataPipe must have method `_check_valid_iterator_id`.")
86
+ elif datapipe._valid_iterator_id != iterator_id:
87
+ raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg)
88
+
89
+
90
+ def _set_datapipe_valid_iterator_id(datapipe):
91
+ """Given a DataPipe, updates its valid iterator ID and reset the DataPipe."""
92
+ if hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
93
+ if hasattr(datapipe, "_set_main_datapipe_valid_iterator_id"):
94
+ datapipe._set_main_datapipe_valid_iterator_id() # reset() is called within this method when appropriate
95
+ else:
96
+ raise RuntimeError("ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`.")
97
+ else:
98
+ if datapipe._valid_iterator_id is None:
99
+ datapipe._valid_iterator_id = 0
100
+ else:
101
+ datapipe._valid_iterator_id += 1
102
+ datapipe.reset()
103
+ return datapipe._valid_iterator_id
104
+
105
+
106
+ def hook_iterator(namespace):
107
+ r"""
108
+ Define a hook that is applied to all `__iter__` of metaclass `_DataPipeMeta`.
109
+
110
+ This is done for the purpose of profiling and checking if an iterator is still valid.
111
+ """
112
+
113
+ def profiler_record_fn_context(datapipe):
114
+ if not hasattr(datapipe, "_profile_name"):
115
+ datapipe._profile_name = _generate_iterdatapipe_msg(datapipe, simplify_dp_name=True)
116
+ return torch.autograd.profiler.record_function(datapipe._profile_name)
117
+
118
+ class IteratorDecorator:
119
+ r"""
120
+ Wrap the iterator and modifying its `__next__` method.
121
+
122
+ This decorator is applied to DataPipes of which `__iter__` method is NOT a generator function.
123
+ Those `__iter__` method commonly returns `self` but not necessarily.
124
+ """
125
+
126
+ def __init__(self, iterator, datapipe, iterator_id, has_next_method):
127
+ self.iterator = iterator
128
+ self.datapipe = datapipe
129
+ self.iterator_id = iterator_id
130
+ self._profiler_enabled = torch.autograd._profiler_enabled()
131
+ # Check if `__iter__` returns `self` and `DataPipe` has `__next__`
132
+ self.self_and_has_next_method = self.iterator is self.datapipe and has_next_method
133
+
134
+ def __iter__(self):
135
+ return self
136
+
137
+ def _get_next(self):
138
+ """Return next with logic related to iterator validity, profiler, and incrementation of samples yielded."""
139
+ _check_iterator_valid(self.datapipe, self.iterator_id)
140
+ result = next(self.iterator)
141
+ if not self.self_and_has_next_method:
142
+ self.datapipe._number_of_samples_yielded += 1
143
+ return result
144
+
145
+ def __next__(self):
146
+ # TODO: Add try-except to in-place reduce traceback from the Exception
147
+ # See: https://github.com/pytorch/data/issues/284
148
+ if self._profiler_enabled:
149
+ with profiler_record_fn_context(self.datapipe):
150
+ return self._get_next()
151
+ else: # Decided against using `contextlib.nullcontext` for performance reasons
152
+ return self._get_next()
153
+
154
+ def __getattr__(self, name):
155
+ return getattr(self.iterator, name)
156
+
157
+ func = namespace['__iter__']
158
+
159
+ # ``__iter__`` of IterDataPipe is a generator function
160
+ if inspect.isgeneratorfunction(func):
161
+ @functools.wraps(func)
162
+ def wrap_generator(*args, **kwargs):
163
+ gen = func(*args, **kwargs)
164
+ datapipe = args[0]
165
+ if datapipe._fast_forward_iterator:
166
+ it = datapipe._fast_forward_iterator
167
+ datapipe._fast_forward_iterator = None
168
+ datapipe._snapshot_state = _SnapshotState.Iterating
169
+ while True:
170
+ try:
171
+ yield next(it)
172
+ except StopIteration:
173
+ return
174
+ iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator
175
+ _profiler_enabled = torch.autograd._profiler_enabled()
176
+ try:
177
+ if _profiler_enabled:
178
+ with profiler_record_fn_context(datapipe):
179
+ response = gen.send(None)
180
+ else:
181
+ response = gen.send(None)
182
+
183
+ while True:
184
+ datapipe._number_of_samples_yielded += 1
185
+ request = yield response
186
+ # Pass through here every time `__next__` is called
187
+ if _profiler_enabled:
188
+ with profiler_record_fn_context(datapipe):
189
+ _check_iterator_valid(datapipe, iterator_id)
190
+ response = gen.send(request)
191
+ else: # Decided against using `contextlib.nullcontext` for performance reasons
192
+ _check_iterator_valid(datapipe, iterator_id)
193
+ response = gen.send(request)
194
+ except StopIteration as e:
195
+ return
196
+ except Exception as e:
197
+ # TODO: Simplify the traceback message to skip over `response = gen.send(None)`
198
+ # Part of https://github.com/pytorch/data/issues/284
199
+ datapipe = args[0]
200
+ msg = "thrown by __iter__ of"
201
+ single_iterator_msg = "single iterator per IterDataPipe constraint"
202
+ if hasattr(e.args, '__len__'):
203
+ full_msg = f"{msg} {datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
204
+ if len(e.args) == 0 or not isinstance(e.args[0], str): # If an exception message doesn't exist
205
+ e.args = (f'\nThis exception is {full_msg}',)
206
+ elif msg not in e.args[0] and single_iterator_msg not in e.args[0]:
207
+ e.args = (e.args[0] + f'\nThis exception is {full_msg}',) + e.args[1:]
208
+ raise
209
+
210
+ namespace['__iter__'] = wrap_generator
211
+ else: # ``__iter__`` of IterDataPipe is NOT a generator function
212
+ # IterDataPipe is an iterator with both ``__iter__`` and ``__next__``
213
+ # And ``__iter__`` may or may not return `self`
214
+ if '__next__' in namespace: # If `__next__` exists, put a wrapper around it
215
+ next_func = namespace['__next__']
216
+
217
+ @functools.wraps(next_func)
218
+ def wrap_next(*args, **kwargs):
219
+ datapipe = args[0]
220
+ if torch.autograd._profiler_enabled():
221
+ with profiler_record_fn_context(datapipe):
222
+ result = next_func(*args, **kwargs)
223
+ else:
224
+ result = next_func(*args, **kwargs)
225
+ datapipe._number_of_samples_yielded += 1
226
+ return result
227
+
228
+ namespace['__next__'] = wrap_next
229
+
230
+ # Note that if the `__next__` and `__iter__` do something completely unrelated. It may cause issue but
231
+ # the user will be violating the iterator protocol. Potential issue:
232
+ # 1. Valid iterator ID may not update or checked properly
233
+ # 2. The number of samples yielded will be miscounted
234
+
235
+ # Regardless if `__next__` exists or not, `__iter__` needs a wrapper to track the number of valid iterators
236
+ @functools.wraps(func)
237
+ def wrap_iter(*args, **kwargs):
238
+ iter_ret = func(*args, **kwargs)
239
+ datapipe = args[0]
240
+ datapipe._snapshot_state = _SnapshotState.Iterating
241
+ if datapipe._fast_forward_iterator:
242
+ iter_ret = datapipe._fast_forward_iterator
243
+ datapipe._fast_forward_iterator = None
244
+ return iter_ret
245
+ iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator
246
+ return IteratorDecorator(iter_ret, datapipe, iterator_id, '__next__' in namespace)
247
+
248
+ namespace['__iter__'] = wrap_iter
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Taking reference from official Python typing
2
+ # https://github.com/python/cpython/blob/master/Lib/typing.py
3
+
4
+ import collections
5
+ import functools
6
+ import numbers
7
+ import sys
8
+
9
+ from torch.utils.data.datapipes._hook_iterator import hook_iterator, _SnapshotState
10
+ from typing import (Any, Dict, Iterator, Generic, List, Set, Tuple, TypeVar, Union,
11
+ get_type_hints)
12
+ from typing import _eval_type, _tp_cache, _type_check, _type_repr # type: ignore[attr-defined]
13
+ from typing import ForwardRef
14
+
15
+ # TODO: Use TypeAlias when Python 3.6 is deprecated
16
+ # Please check [Note: TypeMeta and TypeAlias]
17
+ # In case of metaclass conflict due to ABCMeta or _ProtocolMeta
18
+ # For Python 3.9, only Protocol in typing uses metaclass
19
+ from abc import ABCMeta
20
+ from typing import _GenericAlias # type: ignore[attr-defined, no-redef]
21
+
22
+ class GenericMeta(ABCMeta): # type: ignore[no-redef]
23
+ pass
24
+
25
+
26
+ class Integer(numbers.Integral):
27
+ pass
28
+
29
+
30
+ class Boolean(numbers.Integral):
31
+ pass
32
+
33
+
34
+ # Python 'type' object is not subscriptable
35
+ # Tuple[int, List, dict] -> valid
36
+ # tuple[int, list, dict] -> invalid
37
+ # Map Python 'type' to abstract base class
38
+ TYPE2ABC = {
39
+ bool: Boolean,
40
+ int: Integer,
41
+ float: numbers.Real,
42
+ complex: numbers.Complex,
43
+ dict: Dict,
44
+ list: List,
45
+ set: Set,
46
+ tuple: Tuple,
47
+ None: type(None),
48
+ }
49
+
50
+
51
+ def issubtype(left, right, recursive=True):
52
+ r"""
53
+ Check if the left-side type is a subtype of the right-side type.
54
+
55
+ If any of type is a composite type like `Union` and `TypeVar` with
56
+ bounds, it would be expanded into a list of types and check all
57
+ of left-side types are subtypes of either one from right-side types.
58
+ """
59
+ left = TYPE2ABC.get(left, left)
60
+ right = TYPE2ABC.get(right, right)
61
+
62
+ if right is Any or left == right:
63
+ return True
64
+
65
+ if isinstance(right, _GenericAlias):
66
+ if getattr(right, '__origin__', None) is Generic:
67
+ return True
68
+
69
+ if right == type(None):
70
+ return False
71
+
72
+ # Right-side type
73
+ constraints = _decompose_type(right)
74
+
75
+ if len(constraints) == 0 or Any in constraints:
76
+ return True
77
+
78
+ if left is Any:
79
+ return False
80
+
81
+ # Left-side type
82
+ variants = _decompose_type(left)
83
+
84
+ # all() will return True for empty variants
85
+ if len(variants) == 0:
86
+ return False
87
+
88
+ return all(_issubtype_with_constraints(variant, constraints, recursive) for variant in variants)
89
+
90
+
91
+ def _decompose_type(t, to_list=True):
92
+ if isinstance(t, TypeVar):
93
+ if t.__bound__ is not None:
94
+ ts = [t.__bound__]
95
+ else:
96
+ # For T_co, __constraints__ is ()
97
+ ts = list(t.__constraints__)
98
+ elif hasattr(t, '__origin__') and t.__origin__ == Union:
99
+ ts = t.__args__
100
+ else:
101
+ if not to_list:
102
+ return None
103
+ ts = [t]
104
+ # Ignored: Generator has incompatible item type "object"; expected "Type[Any]"
105
+ ts = [TYPE2ABC.get(_t, _t) for _t in ts] # type: ignore[misc]
106
+ return ts
107
+
108
+
109
+ def _issubtype_with_constraints(variant, constraints, recursive=True):
110
+ r"""
111
+ Check if the variant is a subtype of either one from constraints.
112
+
113
+ For composite types like `Union` and `TypeVar` with bounds, they
114
+ would be expanded for testing.
115
+ """
116
+ if variant in constraints:
117
+ return True
118
+
119
+ # [Note: Subtype for Union and TypeVar]
120
+ # Python typing is able to flatten Union[Union[...]] or Union[TypeVar].
121
+ # But it couldn't flatten the following scenarios:
122
+ # - Union[int, TypeVar[Union[...]]]
123
+ # - TypeVar[TypeVar[...]]
124
+ # So, variant and each constraint may be a TypeVar or a Union.
125
+ # In these cases, all of inner types from the variant are required to be
126
+ # extraced and verified as a subtype of any constraint. And, all of
127
+ # inner types from any constraint being a TypeVar or a Union are
128
+ # also required to be extracted and verified if the variant belongs to
129
+ # any of them.
130
+
131
+ # Variant
132
+ vs = _decompose_type(variant, to_list=False)
133
+
134
+ # Variant is TypeVar or Union
135
+ if vs is not None:
136
+ return all(_issubtype_with_constraints(v, constraints, recursive) for v in vs)
137
+
138
+ # Variant is not TypeVar or Union
139
+ if hasattr(variant, '__origin__') and variant.__origin__ is not None:
140
+ v_origin = variant.__origin__
141
+ # In Python-3.9 typing library untyped generics do not have args
142
+ v_args = getattr(variant, "__args__", None)
143
+ else:
144
+ v_origin = variant
145
+ v_args = None
146
+
147
+ # Constraints
148
+ for constraint in constraints:
149
+ cs = _decompose_type(constraint, to_list=False)
150
+
151
+ # Constraint is TypeVar or Union
152
+ if cs is not None:
153
+ if _issubtype_with_constraints(variant, cs, recursive):
154
+ return True
155
+ # Constraint is not TypeVar or Union
156
+ else:
157
+ # __origin__ can be None for plain list, tuple, ... in Python 3.6
158
+ if hasattr(constraint, '__origin__') and constraint.__origin__ is not None:
159
+ c_origin = constraint.__origin__
160
+ if v_origin == c_origin:
161
+ if not recursive:
162
+ return True
163
+ # In Python-3.9 typing library untyped generics do not have args
164
+ c_args = getattr(constraint, "__args__", None)
165
+ if c_args is None or len(c_args) == 0:
166
+ return True
167
+ if v_args is not None and len(v_args) == len(c_args) and \
168
+ all(issubtype(v_arg, c_arg) for v_arg, c_arg in zip(v_args, c_args)):
169
+ return True
170
+ # Tuple[int] -> Tuple
171
+ else:
172
+ if v_origin == constraint:
173
+ return True
174
+
175
+ return False
176
+
177
+
178
+ def issubinstance(data, data_type):
179
+ if not issubtype(type(data), data_type, recursive=False):
180
+ return False
181
+
182
+ # In Python-3.9 typing library __args__ attribute is not defined for untyped generics
183
+ dt_args = getattr(data_type, "__args__", None)
184
+ if isinstance(data, tuple):
185
+ if dt_args is None or len(dt_args) == 0:
186
+ return True
187
+ if len(dt_args) != len(data):
188
+ return False
189
+ return all(issubinstance(d, t) for d, t in zip(data, dt_args))
190
+ elif isinstance(data, (list, set)):
191
+ if dt_args is None or len(dt_args) == 0:
192
+ return True
193
+ t = dt_args[0]
194
+ return all(issubinstance(d, t) for d in data)
195
+ elif isinstance(data, dict):
196
+ if dt_args is None or len(dt_args) == 0:
197
+ return True
198
+ kt, vt = dt_args
199
+ return all(issubinstance(k, kt) and issubinstance(v, vt) for k, v in data.items())
200
+
201
+ return True
202
+
203
+
204
+ # [Note: TypeMeta and TypeAlias]
205
+ # In order to keep compatibility for Python 3.6, use Meta for the typing.
206
+ # TODO: When PyTorch drops the support for Python 3.6, it can be converted
207
+ # into the Alias system and using `__class_getitem__` for DataPipe. The
208
+ # typing system will gain benefit of performance and resolving metaclass
209
+ # conflicts as elaborated in https://www.python.org/dev/peps/pep-0560/
210
+
211
+
212
+ class _DataPipeType:
213
+ r"""Save type annotation in `param`."""
214
+
215
+ def __init__(self, param):
216
+ self.param = param
217
+
218
+ def __repr__(self):
219
+ return _type_repr(self.param)
220
+
221
+ def __eq__(self, other):
222
+ if isinstance(other, _DataPipeType):
223
+ return self.param == other.param
224
+ return NotImplemented
225
+
226
+ def __hash__(self):
227
+ return hash(self.param)
228
+
229
+ def issubtype(self, other):
230
+ if isinstance(other.param, _GenericAlias):
231
+ if getattr(other.param, '__origin__', None) is Generic:
232
+ return True
233
+ if isinstance(other, _DataPipeType):
234
+ return issubtype(self.param, other.param)
235
+ if isinstance(other, type):
236
+ return issubtype(self.param, other)
237
+ raise TypeError(f"Expected '_DataPipeType' or 'type', but found {type(other)}")
238
+
239
+ def issubtype_of_instance(self, other):
240
+ return issubinstance(other, self.param)
241
+
242
+
243
+ # Default type for DataPipe without annotation
244
+ T_co = TypeVar('T_co', covariant=True)
245
+ _DEFAULT_TYPE = _DataPipeType(Generic[T_co])
246
+
247
+
248
+ class _DataPipeMeta(GenericMeta):
249
+ r"""
250
+ Metaclass for `DataPipe`.
251
+
252
+ Add `type` attribute and `__init_subclass__` based on the type, and validate the return hint of `__iter__`.
253
+
254
+ Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`.
255
+ """
256
+
257
+ type: _DataPipeType
258
+
259
+ def __new__(cls, name, bases, namespace, **kwargs):
260
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
261
+
262
+ # TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now.
263
+ cls.__origin__ = None
264
+ if 'type' in namespace:
265
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
266
+
267
+ namespace['__type_class__'] = False
268
+ # For plain derived class without annotation
269
+ for base in bases:
270
+ if isinstance(base, _DataPipeMeta):
271
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
272
+
273
+ namespace.update({'type': _DEFAULT_TYPE,
274
+ '__init_subclass__': _dp_init_subclass})
275
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
276
+
277
+ def __init__(self, name, bases, namespace, **kwargs):
278
+ super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload]
279
+
280
+ # TODO: Fix isinstance bug
281
+ @_tp_cache
282
+ def _getitem_(self, params):
283
+ if params is None:
284
+ raise TypeError(f'{self.__name__}[t]: t can not be None')
285
+ if isinstance(params, str):
286
+ params = ForwardRef(params)
287
+ if not isinstance(params, tuple):
288
+ params = (params, )
289
+
290
+ msg = f"{self.__name__}[t]: t must be a type"
291
+ params = tuple(_type_check(p, msg) for p in params)
292
+
293
+ if isinstance(self.type.param, _GenericAlias):
294
+ orig = getattr(self.type.param, '__origin__', None)
295
+ if isinstance(orig, type) and orig is not Generic:
296
+ p = self.type.param[params] # type: ignore[index]
297
+ t = _DataPipeType(p)
298
+ l = len(str(self.type)) + 2
299
+ name = self.__name__[:-l]
300
+ name = name + '[' + str(t) + ']'
301
+ bases = (self,) + self.__bases__
302
+ return self.__class__(name, bases,
303
+ {'__init_subclass__': _dp_init_subclass,
304
+ 'type': t,
305
+ '__type_class__': True})
306
+
307
+ if len(params) > 1:
308
+ raise TypeError(f'Too many parameters for {self} actual {len(params)}, expected 1')
309
+
310
+ t = _DataPipeType(params[0])
311
+
312
+ if not t.issubtype(self.type):
313
+ raise TypeError(f'Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]')
314
+
315
+ # Types are equal, fast path for inheritance
316
+ if self.type == t:
317
+ return self
318
+
319
+ name = self.__name__ + '[' + str(t) + ']'
320
+ bases = (self,) + self.__bases__
321
+
322
+ return self.__class__(name, bases,
323
+ {'__init_subclass__': _dp_init_subclass,
324
+ '__type_class__': True,
325
+ 'type': t})
326
+
327
+ # TODO: Fix isinstance bug
328
+ def _eq_(self, other):
329
+ if not isinstance(other, _DataPipeMeta):
330
+ return NotImplemented
331
+ if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type]
332
+ return self is other
333
+ return (self.__origin__ == other.__origin__ # type: ignore[has-type]
334
+ and self.type == other.type)
335
+
336
+ # TODO: Fix isinstance bug
337
+ def _hash_(self):
338
+ return hash((self.__name__, self.type))
339
+
340
+
341
+ class _IterDataPipeMeta(_DataPipeMeta):
342
+ r"""
343
+ Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`.
344
+
345
+ Add various functions for behaviors specific to `IterDataPipe`.
346
+ """
347
+
348
+ def __new__(cls, name, bases, namespace, **kwargs):
349
+
350
+ if 'reset' in namespace:
351
+ reset_func = namespace['reset']
352
+
353
+ @functools.wraps(reset_func)
354
+ def conditional_reset(*args, **kwargs):
355
+ r"""
356
+ Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating` or `NotStarted`.
357
+
358
+ This allows recently restored DataPipe to preserve its restored state during the initial `__iter__` call.
359
+ """
360
+ datapipe = args[0]
361
+ if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted):
362
+ # Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have
363
+ # already begun iterating.
364
+ datapipe._number_of_samples_yielded = 0
365
+ datapipe._fast_forward_iterator = None
366
+ reset_func(*args, **kwargs)
367
+ datapipe._snapshot_state = _SnapshotState.Iterating
368
+
369
+ namespace['reset'] = conditional_reset
370
+
371
+ if '__iter__' in namespace:
372
+ hook_iterator(namespace)
373
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
374
+
375
+
376
+ def _dp_init_subclass(sub_cls, *args, **kwargs):
377
+ # Add function for datapipe instance to reinforce the type
378
+ sub_cls.reinforce_type = reinforce_type
379
+
380
+ # TODO:
381
+ # - add global switch for type checking at compile-time
382
+
383
+ # Ignore internal type class
384
+ if getattr(sub_cls, '__type_class__', False):
385
+ return
386
+
387
+ # Check if the string type is valid
388
+ if isinstance(sub_cls.type.param, ForwardRef):
389
+ base_globals = sys.modules[sub_cls.__module__].__dict__
390
+ try:
391
+ param = _eval_type(sub_cls.type.param, base_globals, locals())
392
+ sub_cls.type.param = param
393
+ except TypeError as e:
394
+ raise TypeError(f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing") from e
395
+
396
+ if '__iter__' in sub_cls.__dict__:
397
+ iter_fn = sub_cls.__dict__['__iter__']
398
+ hints = get_type_hints(iter_fn)
399
+ if 'return' in hints:
400
+ return_hint = hints['return']
401
+ # Plain Return Hint for Python 3.6
402
+ if return_hint == Iterator:
403
+ return
404
+ if not (hasattr(return_hint, '__origin__') and
405
+ (return_hint.__origin__ == Iterator or
406
+ return_hint.__origin__ == collections.abc.Iterator)):
407
+ raise TypeError("Expected 'Iterator' as the return annotation for `__iter__` of {}"
408
+ ", but found {}".format(sub_cls.__name__, _type_repr(hints['return'])))
409
+ data_type = return_hint.__args__[0]
410
+ if not issubtype(data_type, sub_cls.type.param):
411
+ raise TypeError("Expected return type of '__iter__' as a subtype of {}, but found {}"
412
+ " for {}".format(sub_cls.type, _type_repr(data_type), sub_cls.__name__))
413
+
414
+
415
+ def reinforce_type(self, expected_type):
416
+ r"""
417
+ Reinforce the type for DataPipe instance.
418
+
419
+ And the 'expected_type' is required to be a subtype of the original type
420
+ hint to restrict the type requirement of DataPipe instance.
421
+ """
422
+ if isinstance(expected_type, tuple):
423
+ expected_type = Tuple[expected_type]
424
+ _type_check(expected_type, msg="'expected_type' must be a type")
425
+
426
+ if not issubtype(expected_type, self.type.param):
427
+ raise TypeError(f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}")
428
+
429
+ self.type = _DataPipeType(expected_type)
430
+ return self
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data.datapipes.dataframe.dataframes import (
2
+ CaptureDataFrame, DFIterDataPipe,
3
+ )
4
+ from torch.utils.data.datapipes.dataframe.datapipes import (
5
+ DataFramesAsTuplesPipe,
6
+ )
7
+
8
+ __all__ = ['CaptureDataFrame', 'DFIterDataPipe', 'DataFramesAsTuplesPipe']
9
+
10
+ # Please keep this list sorted
11
+ assert __all__ == sorted(__all__)
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (472 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframe_wrapper.cpython-310.pyc ADDED
Binary file (3.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc ADDED
Binary file (1.05 kB). View file
 
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional
2
+
3
+ _pandas: Any = None
4
+ _WITH_PANDAS: Optional[bool] = None
5
+
6
+
7
+ def _try_import_pandas() -> bool:
8
+ try:
9
+ import pandas # type: ignore[import]
10
+ global _pandas
11
+ _pandas = pandas
12
+ return True
13
+ except ImportError:
14
+ return False
15
+
16
+
17
+ # pandas used only for prototyping, will be shortly replaced with TorchArrow
18
+ def _with_pandas() -> bool:
19
+ global _WITH_PANDAS
20
+ if _WITH_PANDAS is None:
21
+ _WITH_PANDAS = _try_import_pandas()
22
+ return _WITH_PANDAS
23
+
24
+
25
+ class PandasWrapper:
26
+ @classmethod
27
+ def create_dataframe(cls, data, columns):
28
+ if not _with_pandas():
29
+ raise Exception("DataFrames prototype requires pandas to function")
30
+ return _pandas.DataFrame(data, columns=columns) # type: ignore[union-attr]
31
+
32
+ @classmethod
33
+ def is_dataframe(cls, data):
34
+ if not _with_pandas():
35
+ return False
36
+ return isinstance(data, _pandas.core.frame.DataFrame) # type: ignore[union-attr]
37
+
38
+ @classmethod
39
+ def is_column(cls, data):
40
+ if not _with_pandas():
41
+ return False
42
+ return isinstance(data, _pandas.core.series.Series) # type: ignore[union-attr]
43
+
44
+ @classmethod
45
+ def iterate(cls, data):
46
+ if not _with_pandas():
47
+ raise Exception("DataFrames prototype requires pandas to function")
48
+ yield from data.itertuples(index=False)
49
+
50
+ @classmethod
51
+ def concat(cls, buffer):
52
+ if not _with_pandas():
53
+ raise Exception("DataFrames prototype requires pandas to function")
54
+ return _pandas.concat(buffer) # type: ignore[union-attr]
55
+
56
+ @classmethod
57
+ def get_item(cls, data, idx):
58
+ if not _with_pandas():
59
+ raise Exception("DataFrames prototype requires pandas to function")
60
+ return data[idx: idx + 1]
61
+
62
+ @classmethod
63
+ def get_len(cls, df):
64
+ if not _with_pandas():
65
+ raise Exception("DataFrames prototype requires pandas to function")
66
+ return len(df.index)
67
+
68
+ @classmethod
69
+ def get_columns(cls, df):
70
+ if not _with_pandas():
71
+ raise Exception("DataFrames prototype requires pandas to function")
72
+ return list(df.columns.values.tolist())
73
+
74
+
75
+ # When you build own implementation just override it with dataframe_wrapper.set_df_wrapper(new_wrapper_class)
76
+ default_wrapper = PandasWrapper
77
+
78
+
79
+ def get_df_wrapper():
80
+ return default_wrapper
81
+
82
+
83
+ def set_df_wrapper(wrapper):
84
+ global default_wrapper
85
+ default_wrapper = wrapper
86
+
87
+
88
+ def create_dataframe(data, columns=None):
89
+ wrapper = get_df_wrapper()
90
+ return wrapper.create_dataframe(data, columns)
91
+
92
+
93
+ def is_dataframe(data):
94
+ wrapper = get_df_wrapper()
95
+ return wrapper.is_dataframe(data)
96
+
97
+
98
+ def get_columns(data):
99
+ wrapper = get_df_wrapper()
100
+ return wrapper.get_columns(data)
101
+
102
+
103
+ def is_column(data):
104
+ wrapper = get_df_wrapper()
105
+ return wrapper.is_column(data)
106
+
107
+
108
+ def concat(buffer):
109
+ wrapper = get_df_wrapper()
110
+ return wrapper.concat(buffer)
111
+
112
+
113
+ def iterate(data):
114
+ wrapper = get_df_wrapper()
115
+ return wrapper.iterate(data)
116
+
117
+
118
+ def get_item(data, idx):
119
+ wrapper = get_df_wrapper()
120
+ return wrapper.get_item(data, idx)
121
+
122
+
123
+ def get_len(df):
124
+ wrapper = get_df_wrapper()
125
+ return wrapper.get_len(df)
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from torch.utils.data.datapipes._decorator import functional_datapipe
4
+ from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe
5
+
6
+ from torch.utils.data.datapipes.dataframe.structures import DataChunkDF
7
+
8
+ # TODO(VitalyFedyunin): Add error when two different traces get combined
9
+
10
+ __all__ = [
11
+ "Capture",
12
+ "CaptureA",
13
+ "CaptureAdd",
14
+ "CaptureCall",
15
+ "CaptureControl",
16
+ "CaptureDataFrame",
17
+ "CaptureDataFrameWithDataPipeOps",
18
+ "CaptureF",
19
+ "CaptureGetAttr",
20
+ "CaptureGetItem",
21
+ "CaptureInitial",
22
+ "CaptureLikeMock",
23
+ "CaptureMul",
24
+ "CaptureSetItem",
25
+ "CaptureSub",
26
+ "CaptureVariable",
27
+ "CaptureVariableAssign",
28
+ "DataFrameTracer",
29
+ "DataFrameTracedOps",
30
+ "disable_capture",
31
+ "get_val",
32
+ ]
33
+
34
+
35
+ def disable_capture():
36
+ CaptureControl.disabled = True
37
+
38
+
39
+ class CaptureControl:
40
+ disabled = False
41
+
42
+
43
+ class DataFrameTracedOps(DFIterDataPipe):
44
+ def __init__(self, source_datapipe, output_var):
45
+ self.source_datapipe = source_datapipe
46
+ self.output_var = output_var
47
+
48
+ def __iter__(self):
49
+ for item in self.source_datapipe:
50
+ yield self.output_var.apply_ops(item)
51
+
52
+
53
+ # TODO(VitalyFedyunin): Extract this list from the DFIterDataPipe registred functions
54
+ DATAPIPES_OPS = ['_dataframes_as_tuples', 'groupby', '_dataframes_filter', 'map', 'to_datapipe',
55
+ 'shuffle', 'concat', 'batch', '_dataframes_per_row', '_dataframes_concat', '_dataframes_shuffle']
56
+
57
+ UNIMPLEMENTED_ATTR = ['__deepcopy__', '__setstate__', 'is_shardable', 'apply_sharding']
58
+
59
+
60
+ class Capture:
61
+ # TODO: All operations are shared across entire InitialCapture, need to figure out what if we join two captures
62
+
63
+ def __init__(self, schema_df=None):
64
+ self.ctx = {'operations': [], 'variables': [], 'schema_df': schema_df}
65
+
66
+ def __str__(self):
67
+ return self._ops_str()
68
+
69
+ def _ops_str(self):
70
+ res = ""
71
+ for op in self.ctx['operations']:
72
+ if len(res) > 0:
73
+ res += "\n"
74
+ res += str(op)
75
+ return res
76
+
77
+ def __getstate__(self):
78
+ # TODO(VitalyFedyunin): Currently can't pickle (why?)
79
+ self.ctx['schema_df'] = None
80
+ for var in self.ctx['variables']:
81
+ var.calculated_value = None
82
+ state = {}
83
+ for item in self.__dict__:
84
+ state[item] = getattr(self, item)
85
+ return state
86
+
87
+ def __setstate__(self, state):
88
+ for k, v in state.items():
89
+ setattr(self, k, v)
90
+
91
+ def __getattr__(self, attrname):
92
+ if attrname == 'kwarg' or attrname == 'kwargs':
93
+ raise Exception('no kwargs!')
94
+ if attrname in ['__deepcopy__']:
95
+ raise AttributeError()
96
+ result = CaptureGetAttr(self, attrname, ctx=self.ctx)
97
+ return result
98
+
99
+ def __getitem__(self, key):
100
+ return CaptureGetItem(self, key, ctx=self.ctx)
101
+
102
+ def __setitem__(self, key, value):
103
+ self.ctx['operations'].append(
104
+ CaptureSetItem(self, key, value, ctx=self.ctx))
105
+
106
+ def __add__(self, add_val):
107
+ res = CaptureAdd(self, add_val, ctx=self.ctx)
108
+ var = CaptureVariable(res, ctx=self.ctx)
109
+ self.ctx['operations'].append(
110
+ CaptureVariableAssign(variable=var, value=res, ctx=self.ctx))
111
+ return var
112
+
113
+ def __sub__(self, add_val):
114
+ res = CaptureSub(self, add_val, ctx=self.ctx)
115
+ var = CaptureVariable(res, ctx=self.ctx)
116
+ self.ctx['operations'].append(
117
+ CaptureVariableAssign(variable=var, value=res, ctx=self.ctx))
118
+ return var
119
+
120
+ def __mul__(self, add_val):
121
+ res = CaptureMul(self, add_val, ctx=self.ctx)
122
+ var = CaptureVariable(res, ctx=self.ctx)
123
+ t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
124
+ self.ctx['operations'].append(t)
125
+ return var
126
+
127
+ def _is_context_empty(self):
128
+ return len(self.ctx['operations']) == 0 and len(self.ctx['variables']) == 0
129
+
130
+ def apply_ops_2(self, dataframe):
131
+ # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
132
+ self.ctx['variables'][0].calculated_value = dataframe
133
+ for op in self.ctx['operations']:
134
+ op.execute()
135
+
136
+ @property
137
+ def columns(self):
138
+ self.apply_ops_2(self.ctx['schema_df'])
139
+ value = self.execute()
140
+ return value.columns
141
+
142
+ # TODO(VitalyFedyunin): Add tests
143
+ # TODO(VitalyFedyunin): Need to join context if one of them are empty because we used capture
144
+
145
+ def __call__(self, *args, **kwargs):
146
+ # TODO: Check if args or kwargs have more than one different context
147
+ if self._is_context_empty():
148
+ # TODO: Allow CaptureA to take context from mock
149
+ for arg in args:
150
+ if isinstance(arg, Capture) and not arg._is_context_empty():
151
+ self.ctx = arg.ctx
152
+ break
153
+ if self._is_context_empty():
154
+ for k, v in kwargs.items():
155
+ if isinstance(k, Capture) and not k._is_context_empty():
156
+ self.ctx = k.ctx
157
+ break
158
+ if isinstance(v, Capture) and not v._is_context_empty():
159
+ self.ctx = v.ctx
160
+ break
161
+
162
+ res = CaptureCall(self, ctx=self.ctx, args=args, kwargs=kwargs)
163
+ var = CaptureVariable(None, ctx=self.ctx)
164
+ t = CaptureVariableAssign(ctx=self.ctx, variable=var, value=res)
165
+ self.ctx['operations'].append(t)
166
+ return var
167
+
168
+
169
+ class CaptureF(Capture):
170
+ def __init__(self, ctx=None, **kwargs):
171
+ if ctx is None:
172
+ self.ctx = {'operations': [], 'variables': []}
173
+ else:
174
+ self.ctx = ctx
175
+ self.kwargs = kwargs
176
+
177
+
178
+ class CaptureA(CaptureF):
179
+ def __str__(self):
180
+ return f"{self.kwargs['name']}"
181
+
182
+ def execute(self):
183
+ value = self.kwargs['real_attribute']
184
+ return value
185
+
186
+
187
+ class CaptureLikeMock:
188
+ def __init__(self, name):
189
+ import unittest.mock as mock
190
+ # TODO(VitalyFedyunin): Do not use provate function here, copy own implementation instead.
191
+ get_target, attribute = mock._get_target(name) # type: ignore[attr-defined]
192
+ self.get_target = get_target
193
+ self.attribute = attribute
194
+ self.name = name
195
+
196
+ def __enter__(self):
197
+ self.save = getattr(self.get_target(), self.attribute)
198
+ capt = CaptureA(name=self.name, real_attribute=self.save)
199
+ setattr(self.get_target(), self.attribute, capt)
200
+
201
+ def __exit__(self, *exc_info):
202
+ setattr(self.get_target(), self.attribute, self.save)
203
+
204
+
205
+ class CaptureCall(Capture):
206
+
207
+ def __init__(self, callable, ctx=None, **kwargs):
208
+ if ctx is None:
209
+ self.ctx = {'operations': [], 'variables': []}
210
+ else:
211
+ self.ctx = ctx
212
+ self.kwargs = kwargs
213
+ self.callable = callable
214
+
215
+ def __str__(self):
216
+ return "{callable}({args},{kwargs})".format(callable=self.callable, **self.kwargs)
217
+
218
+ def execute(self):
219
+
220
+ # TODO: VitalyFedyunin execute kwargs and maybe nested structures
221
+ executed_args = []
222
+ for arg in self.kwargs['args']:
223
+ if isinstance(arg, Capture):
224
+ executed_args.append(arg.execute())
225
+ else:
226
+ executed_args.append(arg)
227
+ left = get_val(self.callable)
228
+ return left(*executed_args, **self.kwargs['kwargs'])
229
+
230
+
231
+ class CaptureVariableAssign(CaptureF):
232
+ def __str__(self):
233
+ variable = self.kwargs['variable']
234
+ value = self.kwargs['value']
235
+ return f"{variable} = {value}"
236
+
237
+ def execute(self):
238
+ self.kwargs['variable'].calculated_value = self.kwargs['value'].execute()
239
+
240
+
241
+ class CaptureVariable(Capture):
242
+ # TODO(VitalyFedyunin): This should be atomic and thread safe
243
+ names_idx = 0
244
+
245
+ def __init__(self, value, ctx):
246
+ if CaptureControl.disabled:
247
+ raise Exception('Attempting to create capture variable with capture off')
248
+ self.ctx = ctx
249
+ self.value = value
250
+ self.name = f'var_{CaptureVariable.names_idx}'
251
+ CaptureVariable.names_idx += 1
252
+ self.ctx['variables'].append(self)
253
+
254
+ def __str__(self):
255
+ return self.name
256
+
257
+ def execute(self):
258
+ return self.calculated_value
259
+
260
+ def apply_ops(self, dataframe):
261
+ # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
262
+ self.ctx['variables'][0].calculated_value = dataframe
263
+ for op in self.ctx['operations']:
264
+ op.execute()
265
+ return self.calculated_value
266
+
267
+
268
+ class CaptureGetItem(Capture):
269
+ def __init__(self, left, key, ctx):
270
+ self.ctx = ctx
271
+ self.left = left
272
+ self.key = key
273
+
274
+ def __str__(self):
275
+ return f"{self.left}[{get_val(self.key)}]"
276
+
277
+ def execute(self):
278
+ left = self.left.execute()
279
+ return left[self.key]
280
+
281
+
282
+ class CaptureSetItem(Capture):
283
+ def __init__(self, left, key, value, ctx):
284
+ self.ctx = ctx
285
+ self.left = left
286
+ self.key = key
287
+ self.value = value
288
+
289
+ def __str__(self):
290
+ return f"{self.left}[{get_val(self.key)}] = {self.value}"
291
+
292
+ def execute(self):
293
+ left = self.left.execute()
294
+ value = self.value.execute()
295
+ left[self.key] = value
296
+
297
+
298
+ class CaptureAdd(Capture):
299
+ def __init__(self, left, right, ctx):
300
+ self.ctx = ctx
301
+ self.left = left
302
+ self.right = right
303
+
304
+ def __str__(self):
305
+ return f"{self.left} + {self.right}"
306
+
307
+ def execute(self):
308
+ return get_val(self.left) + get_val(self.right)
309
+
310
+
311
+ class CaptureMul(Capture):
312
+ def __init__(self, left, right, ctx):
313
+ self.ctx = ctx
314
+ self.left = left
315
+ self.right = right
316
+
317
+ def __str__(self):
318
+ return f"{self.left} * {self.right}"
319
+
320
+ def execute(self):
321
+ return get_val(self.left) * get_val(self.right)
322
+
323
+
324
+ class CaptureSub(Capture):
325
+ def __init__(self, left, right, ctx):
326
+ self.ctx = ctx
327
+ self.left = left
328
+ self.right = right
329
+
330
+ def __str__(self):
331
+ return f"{self.left} - {self.right}"
332
+
333
+ def execute(self):
334
+ return get_val(self.left) - get_val(self.right)
335
+
336
+
337
+ class CaptureGetAttr(Capture):
338
+ def __init__(self, src, name, ctx):
339
+ self.ctx = ctx
340
+ self.src = src
341
+ self.name = name
342
+
343
+ def __str__(self):
344
+ return f"{self.src}.{self.name}"
345
+
346
+ def execute(self):
347
+ val = get_val(self.src)
348
+ return getattr(val, self.name)
349
+
350
+
351
+ def get_val(capture):
352
+ if isinstance(capture, Capture):
353
+ return capture.execute()
354
+ elif isinstance(capture, str):
355
+ return f'"{capture}"'
356
+ else:
357
+ return capture
358
+
359
+
360
+ class CaptureInitial(CaptureVariable):
361
+ def __init__(self, schema_df=None):
362
+ new_ctx: Dict[str, List[Any]] = {'operations': [], 'variables': [], 'schema_df': schema_df}
363
+ super().__init__(None, new_ctx)
364
+ self.name = f'input_{self.name}'
365
+
366
+
367
+ class CaptureDataFrame(CaptureInitial):
368
+ pass
369
+
370
+
371
+ class CaptureDataFrameWithDataPipeOps(CaptureDataFrame):
372
+ def as_datapipe(self):
373
+ return DataFrameTracedOps(
374
+ self.ctx['variables'][0].source_datapipe, self)
375
+
376
+ def raw_iterator(self):
377
+ return self.as_datapipe().__iter__()
378
+
379
+ def __iter__(self):
380
+ return iter(self._dataframes_as_tuples())
381
+
382
+ def batch(self, batch_size=10, drop_last: bool = False, wrapper_class=DataChunkDF):
383
+ dp = self._dataframes_per_row()._dataframes_concat(batch_size)
384
+ dp = dp.as_datapipe().batch(1, drop_last=drop_last, wrapper_class=wrapper_class)
385
+ dp._dp_contains_dataframe = True
386
+ return dp
387
+
388
+ def groupby(self,
389
+ group_key_fn,
390
+ *,
391
+ buffer_size=10000,
392
+ group_size=None,
393
+ guaranteed_group_size=None,
394
+ drop_remaining=False):
395
+ dp = self._dataframes_per_row()
396
+ dp = dp.as_datapipe().groupby(group_key_fn, buffer_size=buffer_size, group_size=group_size,
397
+ guaranteed_group_size=guaranteed_group_size, drop_remaining=drop_remaining)
398
+ return dp
399
+
400
+ def shuffle(self, *args, **kwargs):
401
+ return self._dataframes_shuffle(*args, **kwargs)
402
+
403
+ def filter(self, *args, **kwargs):
404
+ return self._dataframes_filter(*args, **kwargs)
405
+
406
+ def collate(self, *args, **kwargs):
407
+ raise Exception("Can't collate unbatched DataFrames stream")
408
+
409
+ def __getattr__(self, attrname): # ?
410
+ if attrname in UNIMPLEMENTED_ATTR:
411
+ raise AttributeError('Attempting to get ', attrname)
412
+ if attrname in DATAPIPES_OPS:
413
+ return (self.as_datapipe()).__getattr__(attrname)
414
+ return super().__getattr__(attrname)
415
+
416
+
417
+ @functional_datapipe('trace_as_dataframe')
418
+ class DataFrameTracer(CaptureDataFrameWithDataPipeOps, IterDataPipe): # type: ignore[misc]
419
+ source_datapipe: Optional[Any] = None
420
+
421
+ # TODO(VitalyFedyunin): Must implement all special functions of datapipes
422
+
423
+ def set_shuffle_settings(self, *args, **kwargs):
424
+ pass
425
+
426
+ def is_shardable(self):
427
+ return False
428
+
429
+ def __init__(self, source_datapipe, schema_df=None):
430
+ self.source_datapipe = source_datapipe
431
+ if schema_df is None:
432
+ schema_df = next(iter(self.source_datapipe))
433
+ super().__init__(schema_df=schema_df)
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ from torch.utils.data.datapipes._decorator import functional_datapipe
4
+ from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe
5
+
6
+ from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
7
+
8
+ __all__ = [
9
+ "ConcatDataFramesPipe",
10
+ "DataFramesAsTuplesPipe",
11
+ "ExampleAggregateAsDataFrames",
12
+ "FilterDataFramesPipe",
13
+ "PerRowDataFramesPipe",
14
+ "ShuffleDataFramesPipe",
15
+ ]
16
+
17
+
18
+ @functional_datapipe('_dataframes_as_tuples')
19
+ class DataFramesAsTuplesPipe(IterDataPipe):
20
+ def __init__(self, source_datapipe):
21
+ self.source_datapipe = source_datapipe
22
+
23
+ def __iter__(self):
24
+ for df in self.source_datapipe:
25
+ # for record in df.to_records(index=False):
26
+ yield from df_wrapper.iterate(df)
27
+
28
+
29
+ @functional_datapipe('_dataframes_per_row', enable_df_api_tracing=True)
30
+ class PerRowDataFramesPipe(DFIterDataPipe):
31
+ def __init__(self, source_datapipe):
32
+ self.source_datapipe = source_datapipe
33
+
34
+ def __iter__(self):
35
+ for df in self.source_datapipe:
36
+ # TODO(VitalyFedyunin): Replacing with TorchArrow only API, as we are dropping pandas as followup
37
+ for i in range(len(df)):
38
+ yield df[i:i + 1]
39
+
40
+
41
+ @functional_datapipe('_dataframes_concat', enable_df_api_tracing=True)
42
+ class ConcatDataFramesPipe(DFIterDataPipe):
43
+ def __init__(self, source_datapipe, batch=3):
44
+ self.source_datapipe = source_datapipe
45
+ self.n_batch = batch
46
+
47
+ def __iter__(self):
48
+ buffer = []
49
+ for df in self.source_datapipe:
50
+ buffer.append(df)
51
+ if len(buffer) == self.n_batch:
52
+ yield df_wrapper.concat(buffer)
53
+ buffer = []
54
+ if len(buffer):
55
+ yield df_wrapper.concat(buffer)
56
+
57
+
58
+ @functional_datapipe('_dataframes_shuffle', enable_df_api_tracing=True)
59
+ class ShuffleDataFramesPipe(DFIterDataPipe):
60
+ def __init__(self, source_datapipe):
61
+ self.source_datapipe = source_datapipe
62
+
63
+ def __iter__(self):
64
+ size = None
65
+ all_buffer = []
66
+ for df in self.source_datapipe:
67
+ if size is None:
68
+ size = df_wrapper.get_len(df)
69
+ for i in range(df_wrapper.get_len(df)):
70
+ all_buffer.append(df_wrapper.get_item(df, i))
71
+ random.shuffle(all_buffer)
72
+ buffer = []
73
+ for df in all_buffer:
74
+ buffer.append(df)
75
+ if len(buffer) == size:
76
+ yield df_wrapper.concat(buffer)
77
+ buffer = []
78
+ if len(buffer):
79
+ yield df_wrapper.concat(buffer)
80
+
81
+
82
+ @functional_datapipe('_dataframes_filter', enable_df_api_tracing=True)
83
+ class FilterDataFramesPipe(DFIterDataPipe):
84
+ def __init__(self, source_datapipe, filter_fn):
85
+ self.source_datapipe = source_datapipe
86
+ self.filter_fn = filter_fn
87
+
88
+ def __iter__(self):
89
+ size = None
90
+ all_buffer = []
91
+ filter_res = []
92
+ for df in self.source_datapipe:
93
+ if size is None:
94
+ size = len(df.index)
95
+ for i in range(len(df.index)):
96
+ all_buffer.append(df[i:i + 1])
97
+ filter_res.append(self.filter_fn(df.iloc[i]))
98
+
99
+ buffer = []
100
+ for df, res in zip(all_buffer, filter_res):
101
+ if res:
102
+ buffer.append(df)
103
+ if len(buffer) == size:
104
+ yield df_wrapper.concat(buffer)
105
+ buffer = []
106
+ if len(buffer):
107
+ yield df_wrapper.concat(buffer)
108
+
109
+
110
+ @functional_datapipe('_to_dataframes_pipe', enable_df_api_tracing=True)
111
+ class ExampleAggregateAsDataFrames(DFIterDataPipe):
112
+ def __init__(self, source_datapipe, dataframe_size=10, columns=None):
113
+ self.source_datapipe = source_datapipe
114
+ self.columns = columns
115
+ self.dataframe_size = dataframe_size
116
+
117
+ def _as_list(self, item):
118
+ try:
119
+ return list(item)
120
+ except Exception: # TODO(VitalyFedyunin): Replace with better iterable exception
121
+ return [item]
122
+
123
+ def __iter__(self):
124
+ aggregate = []
125
+ for item in self.source_datapipe:
126
+ aggregate.append(self._as_list(item))
127
+ if len(aggregate) == self.dataframe_size:
128
+ yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
129
+ aggregate = []
130
+ if len(aggregate) > 0:
131
+ yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data.datapipes.datapipe import DataChunk
2
+ from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
3
+
4
+ __all__ = ["DataChunkDF", ]
5
+
6
+
7
+ class DataChunkDF(DataChunk):
8
+ """DataChunkDF iterating over individual items inside of DataFrame containers, to access DataFrames user `raw_iterator`."""
9
+
10
+ def __iter__(self):
11
+ for df in self.items:
12
+ yield from df_wrapper.iterate(df)
13
+
14
+ def __len__(self):
15
+ total_len = 0
16
+ for df in self.items:
17
+ total_len += df_wrapper.get_len(df)
18
+ return total_len
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import pickle
3
+ from typing import Dict, Callable, Optional, TypeVar, Generic, Iterator
4
+
5
+ from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta
6
+ from torch.utils.data.datapipes._hook_iterator import _SnapshotState
7
+ from torch.utils.data.datapipes.utils.common import (
8
+ _deprecation_warning,
9
+ _iter_deprecated_functional_names,
10
+ _map_deprecated_functional_names,
11
+ )
12
+ from torch.utils.data.dataset import Dataset, IterableDataset
13
+ from torch.utils._import_utils import import_dill
14
+
15
+ dill = import_dill()
16
+ HAS_DILL = dill is not None
17
+
18
+ __all__ = [
19
+ "DataChunk",
20
+ "DFIterDataPipe",
21
+ "IterDataPipe",
22
+ "MapDataPipe",
23
+ ]
24
+
25
+ T = TypeVar('T')
26
+ T_co = TypeVar('T_co', covariant=True)
27
+
28
+ UNTRACABLE_DATAFRAME_PIPES = ['batch', # As it returns DataChunks
29
+ 'groupby', # As it returns DataChunks
30
+ '_dataframes_as_tuples', # As it unpacks DF
31
+ 'trace_as_dataframe', # As it used to mark DF for tracing
32
+ ]
33
+
34
+
35
+ class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta):
36
+ r"""
37
+ Iterable-style DataPipe.
38
+
39
+ All DataPipes that represent an iterable of data samples should subclass this.
40
+ This style of DataPipes is particularly useful when data come from a stream, or
41
+ when the number of samples is too large to fit them all in memory. ``IterDataPipe`` is lazily initialized and its
42
+ elements are computed only when ``next()`` is called on the iterator of an ``IterDataPipe``.
43
+
44
+ All subclasses should overwrite :meth:`__iter__`, which would return an
45
+ iterator of samples in this DataPipe. Calling ``__iter__`` of an ``IterDataPipe`` automatically invokes its
46
+ method ``reset()``, which by default performs no operation. When writing a custom ``IterDataPipe``, users should
47
+ override ``reset()`` if necessary. The common usages include resetting buffers, pointers,
48
+ and various state variables within the custom ``IterDataPipe``.
49
+
50
+ Note:
51
+ Only `one` iterator can be valid for each ``IterDataPipe`` at a time,
52
+ and the creation a second iterator will invalidate the first one. This constraint is necessary because
53
+ some ``IterDataPipe`` have internal buffers, whose states can become invalid if there are multiple iterators.
54
+ The code example below presents details on how this constraint looks in practice.
55
+ If you have any feedback related to this constraint, please see `GitHub IterDataPipe Single Iterator Issue`_.
56
+
57
+ These DataPipes can be invoked in two ways, using the class constructor or applying their
58
+ functional form onto an existing ``IterDataPipe`` (recommended, available to most but not all DataPipes).
59
+ You can chain multiple `IterDataPipe` together to form a pipeline that will perform multiple
60
+ operations in succession.
61
+
62
+ .. _GitHub IterDataPipe Single Iterator Issue:
63
+ https://github.com/pytorch/data/issues/45
64
+
65
+ Note:
66
+ When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
67
+ item in the DataPipe will be yielded from the :class:`~torch.utils.data.DataLoader`
68
+ iterator. When :attr:`num_workers > 0`, each worker process will have a
69
+ different copy of the DataPipe object, so it is often desired to configure
70
+ each copy independently to avoid having duplicate data returned from the
71
+ workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
72
+ process, returns information about the worker. It can be used in either the
73
+ dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
74
+ :attr:`worker_init_fn` option to modify each copy's behavior.
75
+
76
+ Examples:
77
+ General Usage:
78
+ >>> # xdoctest: +SKIP
79
+ >>> from torchdata.datapipes.iter import IterableWrapper, Mapper
80
+ >>> dp = IterableWrapper(range(10))
81
+ >>> map_dp_1 = Mapper(dp, lambda x: x + 1) # Using class constructor
82
+ >>> map_dp_2 = dp.map(lambda x: x + 1) # Using functional form (recommended)
83
+ >>> list(map_dp_1)
84
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
85
+ >>> list(map_dp_2)
86
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
87
+ >>> filter_dp = map_dp_1.filter(lambda x: x % 2 == 0)
88
+ >>> list(filter_dp)
89
+ [2, 4, 6, 8, 10]
90
+ Single Iterator Constraint Example:
91
+ >>> from torchdata.datapipes.iter import IterableWrapper, Mapper
92
+ >>> source_dp = IterableWrapper(range(10))
93
+ >>> it1 = iter(source_dp)
94
+ >>> list(it1)
95
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
96
+ >>> it1 = iter(source_dp)
97
+ >>> it2 = iter(source_dp) # The creation of a new iterator invalidates `it1`
98
+ >>> next(it2)
99
+ 0
100
+ >>> next(it1) # Further usage of `it1` will raise a `RunTimeError`
101
+ """
102
+
103
+ functions: Dict[str, Callable] = {}
104
+ reduce_ex_hook: Optional[Callable] = None
105
+ getstate_hook: Optional[Callable] = None
106
+ str_hook: Optional[Callable] = None
107
+ repr_hook: Optional[Callable] = None
108
+ _valid_iterator_id: Optional[int] = None
109
+ _number_of_samples_yielded: int = 0
110
+ _snapshot_state: _SnapshotState = _SnapshotState.NotStarted
111
+ _fast_forward_iterator: Optional[Iterator] = None
112
+
113
+ def __iter__(self) -> Iterator[T_co]:
114
+ return self
115
+
116
+ def __getattr__(self, attribute_name):
117
+ if attribute_name in IterDataPipe.functions:
118
+ if attribute_name in _iter_deprecated_functional_names:
119
+ kwargs = _iter_deprecated_functional_names[attribute_name]
120
+ _deprecation_warning(**kwargs)
121
+ f = IterDataPipe.functions[attribute_name]
122
+ function = functools.partial(f, self)
123
+ functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",))
124
+ return function
125
+ else:
126
+ raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute_name}")
127
+
128
+ @classmethod
129
+ def register_function(cls, function_name, function):
130
+ cls.functions[function_name] = function
131
+
132
+ @classmethod
133
+ def register_datapipe_as_function(cls, function_name, cls_to_register, enable_df_api_tracing=False):
134
+ if function_name in cls.functions:
135
+ raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken")
136
+
137
+ def class_function(cls, enable_df_api_tracing, source_dp, *args, **kwargs):
138
+ result_pipe = cls(source_dp, *args, **kwargs)
139
+ if isinstance(result_pipe, IterDataPipe):
140
+ if enable_df_api_tracing or isinstance(source_dp, DFIterDataPipe):
141
+ if function_name not in UNTRACABLE_DATAFRAME_PIPES:
142
+ result_pipe = result_pipe.trace_as_dataframe()
143
+
144
+ return result_pipe
145
+
146
+ function = functools.partial(
147
+ class_function, cls_to_register, enable_df_api_tracing
148
+ )
149
+ functools.update_wrapper(
150
+ wrapper=function, wrapped=cls_to_register, assigned=("__doc__",)
151
+ )
152
+ cls.functions[function_name] = function
153
+
154
+ def __getstate__(self):
155
+ """
156
+ Serialize `lambda` functions when `dill` is available.
157
+
158
+ If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
159
+ `__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
160
+ """
161
+ state = self.__dict__
162
+ if IterDataPipe.getstate_hook is not None:
163
+ return IterDataPipe.getstate_hook(state)
164
+ return state
165
+
166
+ def __reduce_ex__(self, *args, **kwargs):
167
+ if IterDataPipe.reduce_ex_hook is not None:
168
+ try:
169
+ return IterDataPipe.reduce_ex_hook(self)
170
+ except NotImplementedError:
171
+ pass
172
+ return super().__reduce_ex__(*args, **kwargs)
173
+
174
+ @classmethod
175
+ def set_getstate_hook(cls, hook_fn):
176
+ if IterDataPipe.getstate_hook is not None and hook_fn is not None:
177
+ raise Exception("Attempt to override existing getstate_hook")
178
+ IterDataPipe.getstate_hook = hook_fn
179
+
180
+ @classmethod
181
+ def set_reduce_ex_hook(cls, hook_fn):
182
+ if IterDataPipe.reduce_ex_hook is not None and hook_fn is not None:
183
+ raise Exception("Attempt to override existing reduce_ex_hook")
184
+ IterDataPipe.reduce_ex_hook = hook_fn
185
+
186
+ def __repr__(self):
187
+ if self.repr_hook is not None:
188
+ return self.repr_hook(self)
189
+ # Instead of showing <torch. ... .MapperIterDataPipe object at 0x.....>, return the class name
190
+ return str(self.__class__.__qualname__)
191
+
192
+ def __str__(self):
193
+ if self.str_hook is not None:
194
+ return self.str_hook(self)
195
+ # Instead of showing <torch. ... .MapperIterDataPipe object at 0x.....>, return the class name
196
+ return str(self.__class__.__qualname__)
197
+
198
+ def __dir__(self):
199
+ # for auto-completion in a REPL (e.g. Jupyter notebook)
200
+ return list(super().__dir__()) + list(self.functions.keys())
201
+
202
+ def reset(self) -> None:
203
+ r"""
204
+ Reset the `IterDataPipe` to the initial state.
205
+
206
+ By default, no-op. For subclasses of `IterDataPipe`, depending on their functionalities,
207
+ they may want to override this method with implementations that
208
+ may clear the buffers and reset pointers of the DataPipe.
209
+ The `reset` method is always called when `__iter__` is called as part of `hook_iterator`.
210
+ """
211
+ pass
212
+
213
+
214
+ class DFIterDataPipe(IterDataPipe):
215
+ def _is_dfpipe(self):
216
+ return True
217
+
218
+
219
+ class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta):
220
+ r"""
221
+ Map-style DataPipe.
222
+
223
+ All datasets that represent a map from keys to data samples should subclass this.
224
+ Subclasses should overwrite :meth:`__getitem__`, supporting fetching a
225
+ data sample for a given, unique key. Subclasses can also optionally overwrite
226
+ :meth:`__len__`, which is expected to return the size of the dataset by many
227
+ :class:`~torch.utils.data.Sampler` implementations and the default options
228
+ of :class:`~torch.utils.data.DataLoader`.
229
+
230
+ These DataPipes can be invoked in two ways, using the class constructor or applying their
231
+ functional form onto an existing `MapDataPipe` (recommend, available to most but not all DataPipes).
232
+
233
+ Note:
234
+ :class:`~torch.utils.data.DataLoader` by default constructs an index
235
+ sampler that yields integral indices. To make it work with a map-style
236
+ DataPipe with non-integral indices/keys, a custom sampler must be provided.
237
+
238
+ Example:
239
+ >>> # xdoctest: +SKIP
240
+ >>> from torchdata.datapipes.map import SequenceWrapper, Mapper
241
+ >>> dp = SequenceWrapper(range(10))
242
+ >>> map_dp_1 = dp.map(lambda x: x + 1) # Using functional form (recommended)
243
+ >>> list(map_dp_1)
244
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
245
+ >>> map_dp_2 = Mapper(dp, lambda x: x + 1) # Using class constructor
246
+ >>> list(map_dp_2)
247
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
248
+ >>> batch_dp = map_dp_1.batch(batch_size=2)
249
+ >>> list(batch_dp)
250
+ [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
251
+ """
252
+
253
+ functions: Dict[str, Callable] = {}
254
+ reduce_ex_hook: Optional[Callable] = None
255
+ getstate_hook: Optional[Callable] = None
256
+ str_hook: Optional[Callable] = None
257
+ repr_hook: Optional[Callable] = None
258
+
259
+ def __getattr__(self, attribute_name):
260
+ if attribute_name in MapDataPipe.functions:
261
+ if attribute_name in _map_deprecated_functional_names:
262
+ kwargs = _map_deprecated_functional_names[attribute_name]
263
+ _deprecation_warning(**kwargs)
264
+ f = MapDataPipe.functions[attribute_name]
265
+ function = functools.partial(f, self)
266
+ functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",))
267
+ return function
268
+ else:
269
+ raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute_name}")
270
+
271
+ @classmethod
272
+ def register_function(cls, function_name, function):
273
+ cls.functions[function_name] = function
274
+
275
+ @classmethod
276
+ def register_datapipe_as_function(cls, function_name, cls_to_register):
277
+ if function_name in cls.functions:
278
+ raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken")
279
+
280
+ def class_function(cls, source_dp, *args, **kwargs):
281
+ result_pipe = cls(source_dp, *args, **kwargs)
282
+ return result_pipe
283
+
284
+ function = functools.partial(class_function, cls_to_register)
285
+ functools.update_wrapper(
286
+ wrapper=function, wrapped=cls_to_register, assigned=("__doc__",)
287
+ )
288
+ cls.functions[function_name] = function
289
+
290
+ def __getstate__(self):
291
+ """
292
+ Serialize `lambda` functions when `dill` is available.
293
+
294
+ If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
295
+ `__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
296
+ """
297
+ state = self.__dict__
298
+ if MapDataPipe.getstate_hook is not None:
299
+ return MapDataPipe.getstate_hook(state)
300
+ return state
301
+
302
+ def __reduce_ex__(self, *args, **kwargs):
303
+ if MapDataPipe.reduce_ex_hook is not None:
304
+ try:
305
+ return MapDataPipe.reduce_ex_hook(self)
306
+ except NotImplementedError:
307
+ pass
308
+ return super().__reduce_ex__(*args, **kwargs)
309
+
310
+ @classmethod
311
+ def set_getstate_hook(cls, hook_fn):
312
+ if MapDataPipe.getstate_hook is not None and hook_fn is not None:
313
+ raise Exception("Attempt to override existing getstate_hook")
314
+ MapDataPipe.getstate_hook = hook_fn
315
+
316
+ @classmethod
317
+ def set_reduce_ex_hook(cls, hook_fn):
318
+ if MapDataPipe.reduce_ex_hook is not None and hook_fn is not None:
319
+ raise Exception("Attempt to override existing reduce_ex_hook")
320
+ MapDataPipe.reduce_ex_hook = hook_fn
321
+
322
+ def __repr__(self):
323
+ if self.repr_hook is not None:
324
+ return self.repr_hook(self)
325
+ # Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
326
+ return str(self.__class__.__qualname__)
327
+
328
+ def __str__(self):
329
+ if self.str_hook is not None:
330
+ return self.str_hook(self)
331
+ # Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
332
+ return str(self.__class__.__qualname__)
333
+
334
+ def __dir__(self):
335
+ # for auto-completion in a REPL (e.g. Jupyter notebook)
336
+ return list(super().__dir__()) + list(self.functions.keys())
337
+
338
+
339
+
340
+ class _DataPipeSerializationWrapper:
341
+ def __init__(self, datapipe):
342
+ self._datapipe = datapipe
343
+
344
+ def __getstate__(self):
345
+ use_dill = False
346
+ try:
347
+ value = pickle.dumps(self._datapipe)
348
+ except Exception:
349
+ if HAS_DILL:
350
+ value = dill.dumps(self._datapipe)
351
+ use_dill = True
352
+ else:
353
+ raise
354
+ return (value, use_dill)
355
+
356
+ def __setstate__(self, state):
357
+ value, use_dill = state
358
+ if use_dill:
359
+ self._datapipe = dill.loads(value)
360
+ else:
361
+ self._datapipe = pickle.loads(value)
362
+
363
+ def __len__(self):
364
+ try:
365
+ return len(self._datapipe)
366
+ except Exception as e:
367
+ raise TypeError(
368
+ f"{type(self).__name__} instance doesn't have valid length"
369
+ ) from e
370
+
371
+
372
+ class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe):
373
+ def __init__(self, datapipe: IterDataPipe[T_co]):
374
+ super().__init__(datapipe)
375
+ self._datapipe_iter: Optional[Iterator[T_co]] = None
376
+
377
+ def __iter__(self) -> "_IterDataPipeSerializationWrapper":
378
+ self._datapipe_iter = iter(self._datapipe)
379
+ return self
380
+
381
+ def __next__(self) -> T_co: # type: ignore[type-var]
382
+ assert self._datapipe_iter is not None
383
+ return next(self._datapipe_iter)
384
+
385
+
386
+ class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe):
387
+ def __getitem__(self, idx):
388
+ return self._datapipe[idx]
389
+
390
+
391
+ class DataChunk(list, Generic[T]):
392
+ def __init__(self, items):
393
+ super().__init__(items)
394
+ self.items = items
395
+
396
+ def as_str(self, indent=''):
397
+ res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]"
398
+ return res
399
+
400
+ def __iter__(self) -> Iterator[T]:
401
+ yield from super().__iter__()
402
+
403
+ def raw_iterator(self) -> T: # type: ignore[misc]
404
+ yield from self.items
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi ADDED
@@ -0,0 +1,689 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This base template ("datapipe.pyi.in") is generated from mypy stubgen with minimal editing for code injection
2
+ # The output file will be "datapipe.pyi". This is executed as part of torch/CMakeLists.txt
3
+ # Note that, for mypy, .pyi file takes precedent over .py file, such that we must define the interface for other
4
+ # classes/objects here, even though we are not injecting extra code into them at the moment.
5
+
6
+ from typing import Any, Callable, Dict, Generic, Iterator, List, Literal, Optional, TypeVar, Union
7
+
8
+ from torch.utils.data import Dataset, default_collate, IterableDataset
9
+ from torch.utils.data.datapipes._hook_iterator import _SnapshotState
10
+ from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta
11
+
12
+ T_co = TypeVar("T_co", covariant=True)
13
+ T = TypeVar("T")
14
+ UNTRACABLE_DATAFRAME_PIPES: Any
15
+
16
+ class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta):
17
+ functions: Dict[str, Callable] = ...
18
+ reduce_ex_hook: Optional[Callable] = ...
19
+ getstate_hook: Optional[Callable] = ...
20
+ str_hook: Optional[Callable] = ...
21
+ repr_hook: Optional[Callable] = ...
22
+ def __getattr__(self, attribute_name: Any): ...
23
+ @classmethod
24
+ def register_function(cls, function_name: Any, function: Any) -> None: ...
25
+ @classmethod
26
+ def register_datapipe_as_function(
27
+ cls,
28
+ function_name: Any,
29
+ cls_to_register: Any,
30
+ ): ...
31
+ def __getstate__(self): ...
32
+ def __reduce_ex__(self, *args: Any, **kwargs: Any): ...
33
+ @classmethod
34
+ def set_getstate_hook(cls, hook_fn: Any) -> None: ...
35
+ @classmethod
36
+ def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ...
37
+ # Functional form of 'BatcherMapDataPipe'
38
+ def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> MapDataPipe:
39
+ r"""
40
+ Create mini-batches of data (functional name: ``batch``).
41
+
42
+ An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``,
43
+ or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``.
44
+
45
+ Args:
46
+ datapipe: Iterable DataPipe being batched
47
+ batch_size: The size of each batch
48
+ drop_last: Option to drop the last batch if it's not full
49
+
50
+ Example:
51
+ >>> # xdoctest: +SKIP
52
+ >>> from torchdata.datapipes.map import SequenceWrapper
53
+ >>> dp = SequenceWrapper(range(10))
54
+ >>> batch_dp = dp.batch(batch_size=2)
55
+ >>> list(batch_dp)
56
+ [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
57
+ """
58
+
59
+ # Functional form of 'ConcaterMapDataPipe'
60
+ def concat(self, *datapipes: MapDataPipe) -> MapDataPipe:
61
+ r"""
62
+ Concatenate multiple Map DataPipes (functional name: ``concat``).
63
+
64
+ The new index of is the cumulative sum of source DataPipes.
65
+ For example, if there are 2 source DataPipes both with length 5,
66
+ index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to
67
+ elements of the first DataPipe, and 5 to 9 would refer to elements
68
+ of the second DataPipe.
69
+
70
+ Args:
71
+ datapipes: Map DataPipes being concatenated
72
+
73
+ Example:
74
+ >>> # xdoctest: +SKIP
75
+ >>> from torchdata.datapipes.map import SequenceWrapper
76
+ >>> dp1 = SequenceWrapper(range(3))
77
+ >>> dp2 = SequenceWrapper(range(3))
78
+ >>> concat_dp = dp1.concat(dp2)
79
+ >>> list(concat_dp)
80
+ [0, 1, 2, 0, 1, 2]
81
+ """
82
+
83
+ # Functional form of 'MapperMapDataPipe'
84
+ def map(self, fn: Callable= ...) -> MapDataPipe:
85
+ r"""
86
+ Apply the input function over each item from the source DataPipe (functional name: ``map``).
87
+
88
+ The function can be any regular Python function or partial object. Lambda
89
+ function is not recommended as it is not supported by pickle.
90
+
91
+ Args:
92
+ datapipe: Source MapDataPipe
93
+ fn: Function being applied to each item
94
+
95
+ Example:
96
+ >>> # xdoctest: +SKIP
97
+ >>> from torchdata.datapipes.map import SequenceWrapper, Mapper
98
+ >>> def add_one(x):
99
+ ... return x + 1
100
+ >>> dp = SequenceWrapper(range(10))
101
+ >>> map_dp_1 = dp.map(add_one)
102
+ >>> list(map_dp_1)
103
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
104
+ >>> map_dp_2 = Mapper(dp, lambda x: x + 1)
105
+ >>> list(map_dp_2)
106
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
107
+ """
108
+
109
+ # Functional form of 'ShufflerIterDataPipe'
110
+ def shuffle(self, *, indices: Optional[List] = None) -> IterDataPipe:
111
+ r"""
112
+ Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``).
113
+
114
+ When it is used with :class:`~torch.utils.data.DataLoader`, the methods to
115
+ set up random seed are different based on :attr:`num_workers`.
116
+
117
+ For single-process mode (:attr:`num_workers == 0`), the random seed is set before
118
+ the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
119
+ mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed
120
+ for each worker process.
121
+
122
+ Args:
123
+ datapipe: MapDataPipe being shuffled
124
+ indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing
125
+
126
+ Example:
127
+ >>> # xdoctest: +SKIP
128
+ >>> from torchdata.datapipes.map import SequenceWrapper
129
+ >>> dp = SequenceWrapper(range(10))
130
+ >>> shuffle_dp = dp.shuffle().set_seed(0)
131
+ >>> list(shuffle_dp)
132
+ [7, 8, 1, 5, 3, 4, 2, 0, 9, 6]
133
+ >>> list(shuffle_dp)
134
+ [6, 1, 9, 5, 2, 4, 7, 3, 8, 0]
135
+ >>> # Reset seed for Shuffler
136
+ >>> shuffle_dp = shuffle_dp.set_seed(0)
137
+ >>> list(shuffle_dp)
138
+ [7, 8, 1, 5, 3, 4, 2, 0, 9, 6]
139
+
140
+ Note:
141
+ Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an
142
+ ``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to
143
+ the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order
144
+ of data during data-processing.
145
+ """
146
+
147
+ # Functional form of 'ZipperMapDataPipe'
148
+ def zip(self, *datapipes: MapDataPipe[T_co]) -> MapDataPipe:
149
+ r"""
150
+ Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
151
+
152
+ This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted.
153
+
154
+ Args:
155
+ *datapipes: Map DataPipes being aggregated
156
+
157
+ Example:
158
+ >>> # xdoctest: +SKIP
159
+ >>> from torchdata.datapipes.map import SequenceWrapper
160
+ >>> dp1 = SequenceWrapper(range(3))
161
+ >>> dp2 = SequenceWrapper(range(10, 13))
162
+ >>> zip_dp = dp1.zip(dp2)
163
+ >>> list(zip_dp)
164
+ [(0, 10), (1, 11), (2, 12)]
165
+ """
166
+
167
+
168
+ class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta):
169
+ functions: Dict[str, Callable] = ...
170
+ reduce_ex_hook: Optional[Callable] = ...
171
+ getstate_hook: Optional[Callable] = ...
172
+ str_hook: Optional[Callable] = ...
173
+ repr_hook: Optional[Callable] = ...
174
+ _number_of_samples_yielded: int = ...
175
+ _snapshot_state: _SnapshotState = _SnapshotState.Iterating
176
+ _fast_forward_iterator: Optional[Iterator] = ...
177
+ def __getattr__(self, attribute_name: Any): ...
178
+ @classmethod
179
+ def register_function(cls, function_name: Any, function: Any) -> None: ...
180
+ @classmethod
181
+ def register_datapipe_as_function(
182
+ cls,
183
+ function_name: Any,
184
+ cls_to_register: Any,
185
+ enable_df_api_tracing: bool = ...,
186
+ ): ...
187
+ def __getstate__(self): ...
188
+ def __reduce_ex__(self, *args: Any, **kwargs: Any): ...
189
+ @classmethod
190
+ def set_getstate_hook(cls, hook_fn: Any) -> None: ...
191
+ @classmethod
192
+ def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ...
193
+ # Functional form of 'BatcherIterDataPipe'
194
+ def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> IterDataPipe:
195
+ r"""
196
+ Creates mini-batches of data (functional name: ``batch``).
197
+
198
+ An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the
199
+ last batch if ``drop_last`` is set to ``False``.
200
+
201
+ Args:
202
+ datapipe: Iterable DataPipe being batched
203
+ batch_size: The size of each batch
204
+ drop_last: Option to drop the last batch if it's not full
205
+ wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding,
206
+ defaults to ``DataChunk``
207
+
208
+ Example:
209
+ >>> # xdoctest: +SKIP
210
+ >>> from torchdata.datapipes.iter import IterableWrapper
211
+ >>> dp = IterableWrapper(range(10))
212
+ >>> dp = dp.batch(batch_size=3, drop_last=True)
213
+ >>> list(dp)
214
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
215
+ """
216
+
217
+ # Functional form of 'CollatorIterDataPipe'
218
+ def collate(self, conversion: Optional[Union[Callable[..., Any],Dict[Union[str, Any], Union[Callable, Any]],]] = default_collate, collate_fn: Optional[Callable] = None) -> IterDataPipe:
219
+ r"""
220
+ Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``).
221
+
222
+ By default, it uses :func:`torch.utils.data.default_collate`.
223
+
224
+ .. note::
225
+ While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the
226
+ default behavior and `functools.partial` to specify any additional arguments.
227
+
228
+ Args:
229
+ datapipe: Iterable DataPipe being collated
230
+ collate_fn: Customized collate function to collect and combine data or a batch of data.
231
+ Default function collates to Tensor(s) based on data type.
232
+
233
+ Example:
234
+ >>> # xdoctest: +SKIP
235
+ >>> # Convert integer data to float Tensor
236
+ >>> class MyIterDataPipe(torch.utils.data.IterDataPipe):
237
+ ... def __init__(self, start, end):
238
+ ... super(MyIterDataPipe).__init__()
239
+ ... assert end > start, "this example code only works with end >= start"
240
+ ... self.start = start
241
+ ... self.end = end
242
+ ...
243
+ ... def __iter__(self):
244
+ ... return iter(range(self.start, self.end))
245
+ ...
246
+ ... def __len__(self):
247
+ ... return self.end - self.start
248
+ ...
249
+ >>> ds = MyIterDataPipe(start=3, end=7)
250
+ >>> print(list(ds))
251
+ [3, 4, 5, 6]
252
+ >>> def collate_fn(batch):
253
+ ... return torch.tensor(batch, dtype=torch.float)
254
+ ...
255
+ >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn)
256
+ >>> print(list(collated_ds))
257
+ [tensor(3.), tensor(4.), tensor(5.), tensor(6.)]
258
+ """
259
+
260
+ # Functional form of 'ConcaterIterDataPipe'
261
+ def concat(self, *datapipes: IterDataPipe) -> IterDataPipe:
262
+ r"""
263
+ Concatenates multiple Iterable DataPipes (functional name: ``concat``).
264
+
265
+ The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones.
266
+
267
+ Args:
268
+ datapipes: Iterable DataPipes being concatenated
269
+
270
+ Example:
271
+ >>> # xdoctest: +REQUIRES(module:torchdata)
272
+ >>> import random
273
+ >>> from torchdata.datapipes.iter import IterableWrapper
274
+ >>> dp1 = IterableWrapper(range(3))
275
+ >>> dp2 = IterableWrapper(range(5))
276
+ >>> list(dp1.concat(dp2))
277
+ [0, 1, 2, 0, 1, 2, 3, 4]
278
+ """
279
+
280
+ # Functional form of 'DemultiplexerIterDataPipe'
281
+ def demux(self, num_instances: int, classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000) -> List[IterDataPipe]:
282
+ r"""
283
+ Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``).
284
+
285
+ A list of the child DataPipes is returned from this operation.
286
+
287
+ Args:
288
+ datapipe: Iterable DataPipe being filtered
289
+ num_instances: number of instances of the DataPipe to create
290
+ classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None``
291
+ drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None``
292
+ buffer_size: this defines the maximum number of inputs that the buffer can hold across all child
293
+ DataPipes while waiting for their values to be yielded.
294
+ Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
295
+
296
+ Examples:
297
+ >>> # xdoctest: +REQUIRES(module:torchdata)
298
+ >>> from torchdata.datapipes.iter import IterableWrapper
299
+ >>> def odd_or_even(n):
300
+ ... return n % 2
301
+ >>> source_dp = IterableWrapper(range(5))
302
+ >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even)
303
+ >>> list(dp1)
304
+ [0, 2, 4]
305
+ >>> list(dp2)
306
+ [1, 3]
307
+ >>> # It can also filter out any element that gets `None` from the `classifier_fn`
308
+ >>> def odd_or_even_no_zero(n):
309
+ ... return n % 2 if n != 0 else None
310
+ >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True)
311
+ >>> list(dp1)
312
+ [2, 4]
313
+ >>> list(dp2)
314
+ [1, 3]
315
+ """
316
+
317
+ # Functional form of 'FilterIterDataPipe'
318
+ def filter(self, filter_fn: Callable, input_col=None) -> IterDataPipe:
319
+ r"""
320
+ Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``).
321
+
322
+ Args:
323
+ datapipe: Iterable DataPipe being filtered
324
+ filter_fn: Customized function mapping an element to a boolean.
325
+ input_col: Index or indices of data which ``filter_fn`` is applied, such as:
326
+
327
+ - ``None`` as default to apply ``filter_fn`` to the data directly.
328
+ - Integer(s) is used for list/tuple.
329
+ - Key(s) is used for dict.
330
+
331
+ Example:
332
+ >>> # xdoctest: +SKIP
333
+ >>> from torchdata.datapipes.iter import IterableWrapper
334
+ >>> def is_even(n):
335
+ ... return n % 2 == 0
336
+ >>> dp = IterableWrapper(range(5))
337
+ >>> filter_dp = dp.filter(filter_fn=is_even)
338
+ >>> list(filter_dp)
339
+ [0, 2, 4]
340
+ """
341
+
342
+ # Functional form of 'ForkerIterDataPipe'
343
+ def fork(self, num_instances: int, buffer_size: int = 1000, copy: Optional[Literal["shallow", "deep"]] = None) -> List[IterDataPipe]:
344
+ r"""
345
+ Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``).
346
+
347
+ Args:
348
+ datapipe: Iterable DataPipe being copied
349
+ num_instances: number of instances of the datapipe to create
350
+ buffer_size: this restricts how far ahead the leading child DataPipe
351
+ can read relative to the slowest child DataPipe.
352
+ Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
353
+ copy: copy strategy to use for items yielded by each branch. Supported
354
+ options are ``None`` for no copying, ``"shallow"`` for shallow object
355
+ copies, and ``"deep"`` for deep object copies. Defaults to ``None``.
356
+
357
+ Note:
358
+ All branches of the forked pipeline return the identical object unless
359
+ the copy parameter is supplied. If the object is mutable or contains
360
+ mutable objects, changing them in one branch will affect all others.
361
+
362
+ Example:
363
+ >>> # xdoctest: +REQUIRES(module:torchdata)
364
+ >>> from torchdata.datapipes.iter import IterableWrapper
365
+ >>> source_dp = IterableWrapper(range(5))
366
+ >>> dp1, dp2 = source_dp.fork(num_instances=2)
367
+ >>> list(dp1)
368
+ [0, 1, 2, 3, 4]
369
+ >>> list(dp2)
370
+ [0, 1, 2, 3, 4]
371
+ """
372
+
373
+ # Functional form of 'GrouperIterDataPipe'
374
+ def groupby(self, group_key_fn: Callable[[T_co], Any], *, keep_key: bool = False, buffer_size: int = 10000, group_size: Optional[int] = None, guaranteed_group_size: Optional[int] = None, drop_remaining: bool = False) -> IterDataPipe:
375
+ r"""
376
+ Groups data from IterDataPipe by keys from ``group_key_fn``, yielding a ``DataChunk`` with batch size up to ``group_size``.
377
+
378
+ (functional name: ``groupby``).
379
+
380
+ The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group
381
+ will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full,
382
+ the DataPipe will yield the largest batch with the same key, provided that its size is larger
383
+ than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``.
384
+
385
+ After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity
386
+ will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``.
387
+
388
+ Args:
389
+ datapipe: Iterable datapipe to be grouped
390
+ group_key_fn: Function used to generate group key from the data of the source datapipe
391
+ keep_key: Option to yield the matching key along with the items in a tuple,
392
+ resulting in `(key, [items])` otherwise returning [items]
393
+ buffer_size: The size of buffer for ungrouped data
394
+ group_size: The max size of each group, a batch is yielded as soon as it reaches this size
395
+ guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full
396
+ drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer
397
+ when the buffer is full
398
+
399
+ Example:
400
+ >>> import os
401
+ >>> # xdoctest: +SKIP
402
+ >>> from torchdata.datapipes.iter import IterableWrapper
403
+ >>> def group_fn(file):
404
+ ... return os.path.basename(file).split(".")[0]
405
+ >>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"])
406
+ >>> dp0 = source_dp.groupby(group_key_fn=group_fn)
407
+ >>> list(dp0)
408
+ [['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']]
409
+ >>> # A group is yielded as soon as its size equals to `group_size`
410
+ >>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2)
411
+ >>> list(dp1)
412
+ [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
413
+ >>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size`
414
+ >>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2)
415
+ >>> list(dp2)
416
+ [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
417
+ """
418
+
419
+ # Functional form of 'FileListerIterDataPipe'
420
+ def list_files(self, masks: Union[str, List[str]] = '', *, recursive: bool = False, abspath: bool = False, non_deterministic: bool = False, length: int = -1) -> IterDataPipe:
421
+ r"""
422
+ Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.
423
+
424
+ Multiple root directories can be provided (functional name: ``list_files``).
425
+
426
+ Args:
427
+ root: Root directory or a sequence of root directories
428
+ masks: Unix style filter string or string list for filtering file name(s)
429
+ recursive: Whether to return pathname from nested directories or not
430
+ abspath: Whether to return relative pathname or absolute pathname
431
+ non_deterministic: Whether to return pathname in sorted order or not.
432
+ If ``False``, the results yielded from each root directory will be sorted
433
+ length: Nominal length of the datapipe
434
+
435
+ Example:
436
+ >>> # xdoctest: +SKIP
437
+ >>> from torchdata.datapipes.iter import FileLister
438
+ >>> dp = FileLister(root=".", recursive=True)
439
+ >>> list(dp)
440
+ ['example.py', './data/data.tar']
441
+ """
442
+
443
+ # Functional form of 'MapperIterDataPipe'
444
+ def map(self, fn: Callable, input_col=None, output_col=None) -> IterDataPipe:
445
+ r"""
446
+ Applies a function over each item from the source DataPipe (functional name: ``map``).
447
+
448
+ The function can be any regular Python function or partial object. Lambda
449
+ function is not recommended as it is not supported by pickle.
450
+
451
+ Args:
452
+ datapipe: Source Iterable DataPipe
453
+ fn: Function being applied over each item
454
+ input_col: Index or indices of data which ``fn`` is applied, such as:
455
+
456
+ - ``None`` as default to apply ``fn`` to the data directly.
457
+ - Integer(s) is used for list/tuple.
458
+ - Key(s) is used for dict.
459
+
460
+ output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified
461
+ only when ``input_col`` is not ``None``
462
+
463
+ - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with
464
+ multiple indices, the left-most one is used, and other indices will be removed.
465
+ - Integer is used for list/tuple. ``-1`` represents to append result at the end.
466
+ - Key is used for dict. New key is acceptable.
467
+
468
+ Example:
469
+ >>> # xdoctest: +SKIP
470
+ >>> from torchdata.datapipes.iter import IterableWrapper, Mapper
471
+ >>> def add_one(x):
472
+ ... return x + 1
473
+ >>> dp = IterableWrapper(range(10))
474
+ >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred
475
+ >>> list(map_dp_1)
476
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
477
+ >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle`
478
+ >>> # Use `functools.partial` or explicitly define the function instead
479
+ >>> map_dp_2 = Mapper(dp, lambda x: x + 1)
480
+ >>> list(map_dp_2)
481
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
482
+ """
483
+
484
+ # Functional form of 'MultiplexerIterDataPipe'
485
+ def mux(self, *datapipes) -> IterDataPipe:
486
+ r"""
487
+ Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``).
488
+
489
+ As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration,
490
+ and so on. It ends when the shortest input DataPipe is exhausted.
491
+
492
+ Args:
493
+ datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted
494
+
495
+ Example:
496
+ >>> # xdoctest: +REQUIRES(module:torchdata)
497
+ >>> from torchdata.datapipes.iter import IterableWrapper
498
+ >>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
499
+ >>> list(dp1.mux(dp2, dp3))
500
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
501
+ """
502
+
503
+ # Functional form of 'FileOpenerIterDataPipe'
504
+ def open_files(self, mode: str = 'r', encoding: Optional[str] = None, length: int = -1) -> IterDataPipe:
505
+ r"""
506
+ Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``).
507
+
508
+ Args:
509
+ datapipe: Iterable datapipe that provides pathnames
510
+ mode: An optional string that specifies the mode in which
511
+ the file is opened by ``open()``. It defaults to ``r``, other options are
512
+ ``b`` for reading in binary mode and ``t`` for text mode.
513
+ encoding: An optional string that specifies the encoding of the
514
+ underlying file. It defaults to ``None`` to match the default encoding of ``open``.
515
+ length: Nominal length of the datapipe
516
+
517
+ Note:
518
+ The opened file handles will be closed by Python's GC periodically. Users can choose
519
+ to close them explicitly.
520
+
521
+ Example:
522
+ >>> # xdoctest: +SKIP
523
+ >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader
524
+ >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt'))
525
+ >>> dp = FileOpener(dp)
526
+ >>> dp = StreamReader(dp)
527
+ >>> list(dp)
528
+ [('./abc.txt', 'abc')]
529
+ """
530
+
531
+ # Functional form of 'StreamReaderIterDataPipe'
532
+ def read_from_stream(self, chunk=None) -> IterDataPipe:
533
+ r"""
534
+ Given IO streams and their label names, yield bytes with label name as tuple.
535
+
536
+ (functional name: ``read_from_stream``).
537
+
538
+ Args:
539
+ datapipe: Iterable DataPipe provides label/URL and byte stream
540
+ chunk: Number of bytes to be read from stream per iteration.
541
+ If ``None``, all bytes will be read until the EOF.
542
+
543
+ Example:
544
+ >>> # xdoctest: +SKIP
545
+ >>> from torchdata.datapipes.iter import IterableWrapper, StreamReader
546
+ >>> from io import StringIO
547
+ >>> dp = IterableWrapper([("alphabet", StringIO("abcde"))])
548
+ >>> list(StreamReader(dp, chunk=1))
549
+ [('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')]
550
+ """
551
+
552
+ # Functional form of 'RoutedDecoderIterDataPipe'
553
+ def routed_decode(self, *handlers: Callable, key_fn: Callable= ...) -> IterDataPipe:
554
+ r"""
555
+ Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple.
556
+
557
+ (functional name: ``routed_decode``)
558
+
559
+ Args:
560
+ datapipe: Iterable datapipe that provides pathname and binary stream in tuples
561
+ handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder
562
+ handlers will be set as default. If multiple handles are provided, the priority
563
+ order follows the order of handlers (the first handler has the top priority)
564
+ key_fn: Function for decoder to extract key from pathname to dispatch handlers.
565
+ Default is set to extract file extension from pathname
566
+
567
+ Note:
568
+ When ``key_fn`` is specified returning anything other than extension, the default
569
+ handler will not work and users need to specify custom handler. Custom handler
570
+ could use regex to determine the eligibility to handle data.
571
+ """
572
+
573
+ # Functional form of 'ShardingFilterIterDataPipe'
574
+ def sharding_filter(self, sharding_group_filter=None) -> IterDataPipe:
575
+ r"""
576
+ Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``).
577
+
578
+ After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the
579
+ original DataPipe, where `n` equals to the number of instances.
580
+
581
+ Args:
582
+ source_datapipe: Iterable DataPipe that will be sharded
583
+ """
584
+
585
+ # Functional form of 'ShufflerIterDataPipe'
586
+ def shuffle(self, *, buffer_size: int = 10000, unbatch_level: int = 0) -> IterDataPipe:
587
+ r"""
588
+ Shuffle the input DataPipe with a buffer (functional name: ``shuffle``).
589
+
590
+ The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then,
591
+ each item will be yielded from the buffer by reservoir sampling via iterator.
592
+
593
+ ``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the
594
+ datapipe is not shuffled. In order to fully shuffle all elements from datapipe,
595
+ ``buffer_size`` is required to be greater than or equal to the size of datapipe.
596
+
597
+ When it is used with :class:`torch.utils.data.DataLoader`, the methods to
598
+ set up random seed are different based on :attr:`num_workers`.
599
+
600
+ For single-process mode (:attr:`num_workers == 0`), the random seed is set before
601
+ the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
602
+ mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed
603
+ for each worker process.
604
+
605
+ Args:
606
+ datapipe: The IterDataPipe being shuffled
607
+ buffer_size: The buffer size for shuffling (default to ``10000``)
608
+ unbatch_level: Specifies if it is necessary to unbatch source data before
609
+ applying the shuffle
610
+
611
+ Example:
612
+ >>> # xdoctest: +SKIP
613
+ >>> from torchdata.datapipes.iter import IterableWrapper
614
+ >>> dp = IterableWrapper(range(10))
615
+ >>> shuffle_dp = dp.shuffle()
616
+ >>> list(shuffle_dp)
617
+ [0, 4, 1, 6, 3, 2, 9, 5, 7, 8]
618
+ """
619
+
620
+ # Functional form of 'UnBatcherIterDataPipe'
621
+ def unbatch(self, unbatch_level: int = 1) -> IterDataPipe:
622
+ r"""
623
+ Undos batching of data (functional name: ``unbatch``).
624
+
625
+ In other words, it flattens the data up to the specified level within a batched DataPipe.
626
+
627
+ Args:
628
+ datapipe: Iterable DataPipe being un-batched
629
+ unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``,
630
+ it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe.
631
+
632
+ Example:
633
+ >>> # xdoctest: +SKIP
634
+ >>> from torchdata.datapipes.iter import IterableWrapper
635
+ >>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]])
636
+ >>> dp1 = source_dp.unbatch()
637
+ >>> list(dp1)
638
+ [[0, 1], [2], [3, 4], [5], [6]]
639
+ >>> dp2 = source_dp.unbatch(unbatch_level=2)
640
+ >>> list(dp2)
641
+ [0, 1, 2, 3, 4, 5, 6]
642
+ """
643
+
644
+ # Functional form of 'ZipperIterDataPipe'
645
+ def zip(self, *datapipes: IterDataPipe) -> IterDataPipe:
646
+ r"""
647
+ Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
648
+
649
+ The output is stopped as soon as the shortest input DataPipe is exhausted.
650
+
651
+ Args:
652
+ *datapipes: Iterable DataPipes being aggregated
653
+
654
+ Example:
655
+ >>> # xdoctest: +REQUIRES(module:torchdata)
656
+ >>> from torchdata.datapipes.iter import IterableWrapper
657
+ >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
658
+ >>> list(dp1.zip(dp2, dp3))
659
+ [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)]
660
+ """
661
+
662
+
663
+ class DFIterDataPipe(IterDataPipe):
664
+ def _is_dfpipe(self): ...
665
+ def __iter__(self): ...
666
+
667
+ class _DataPipeSerializationWrapper:
668
+ def __init__(self, datapipe): ...
669
+ def __getstate__(self): ...
670
+ def __setstate__(self, state): ...
671
+ def __len__(self): ...
672
+
673
+ class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe):
674
+ def __iter__(self): ...
675
+
676
+ class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe):
677
+ def __getitem__(self, idx): ...
678
+
679
+ class DataChunk(list, Generic[T]):
680
+ def __init__(self, items):
681
+ super().__init__(items)
682
+ self.items = items
683
+ def as_str(self, indent: str = "") -> str:
684
+ res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]"
685
+ return res
686
+ def __iter__(self) -> Iterator[T]:
687
+ yield from super().__iter__()
688
+ def raw_iterator(self) -> T: # type: ignore[misc]
689
+ yield from self.items
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ from collections import defaultdict
4
+ from typing import Any, Dict, List, Set, Tuple, Union
5
+
6
+
7
+ def materialize_lines(lines: List[str], indentation: int) -> str:
8
+ output = ""
9
+ new_line_with_indent = "\n" + " " * indentation
10
+ for i, line in enumerate(lines):
11
+ if i != 0:
12
+ output += new_line_with_indent
13
+ output += line.replace('\n', new_line_with_indent)
14
+ return output
15
+
16
+
17
+ def gen_from_template(dir: str, template_name: str, output_name: str, replacements: List[Tuple[str, Any, int]]):
18
+
19
+ template_path = os.path.join(dir, template_name)
20
+ output_path = os.path.join(dir, output_name)
21
+
22
+ with open(template_path) as f:
23
+ content = f.read()
24
+ for placeholder, lines, indentation in replacements:
25
+ with open(output_path, "w") as f:
26
+ content = content.replace(placeholder, materialize_lines(lines, indentation))
27
+ f.write(content)
28
+
29
+
30
+ def find_file_paths(dir_paths: List[str], files_to_exclude: Set[str]) -> Set[str]:
31
+ """
32
+ When given a path to a directory, returns the paths to the relevant files within it.
33
+
34
+ This function does NOT recursive traverse to subdirectories.
35
+ """
36
+ paths: Set[str] = set()
37
+ for dir_path in dir_paths:
38
+ all_files = os.listdir(dir_path)
39
+ python_files = {fname for fname in all_files if ".py" == fname[-3:]}
40
+ filter_files = {fname for fname in python_files if fname not in files_to_exclude}
41
+ paths.update({os.path.join(dir_path, fname) for fname in filter_files})
42
+ return paths
43
+
44
+
45
+ def extract_method_name(line: str) -> str:
46
+ """Extract method name from decorator in the form of "@functional_datapipe({method_name})"."""
47
+ if "(\"" in line:
48
+ start_token, end_token = "(\"", "\")"
49
+ elif "(\'" in line:
50
+ start_token, end_token = "(\'", "\')"
51
+ else:
52
+ raise RuntimeError(f"Unable to find appropriate method name within line:\n{line}")
53
+ start, end = line.find(start_token) + len(start_token), line.find(end_token)
54
+ return line[start:end]
55
+
56
+
57
+ def extract_class_name(line: str) -> str:
58
+ """Extract class name from class definition in the form of "class {CLASS_NAME}({Type}):"."""
59
+ start_token = "class "
60
+ end_token = "("
61
+ start, end = line.find(start_token) + len(start_token), line.find(end_token)
62
+ return line[start:end]
63
+
64
+
65
+ def parse_datapipe_file(file_path: str) -> Tuple[Dict[str, str], Dict[str, str], Set[str], Dict[str, List[str]]]:
66
+ """Given a path to file, parses the file and returns a dictionary of method names to function signatures."""
67
+ method_to_signature, method_to_class_name, special_output_type = {}, {}, set()
68
+ doc_string_dict = defaultdict(list)
69
+ with open(file_path) as f:
70
+ open_paren_count = 0
71
+ method_name, class_name, signature = "", "", ""
72
+ skip = False
73
+ for line in f:
74
+ if line.count("\"\"\"") % 2 == 1:
75
+ skip = not skip
76
+ if skip or "\"\"\"" in line: # Saving docstrings
77
+ doc_string_dict[method_name].append(line)
78
+ continue
79
+ if "@functional_datapipe" in line:
80
+ method_name = extract_method_name(line)
81
+ doc_string_dict[method_name] = []
82
+ continue
83
+ if method_name and "class " in line:
84
+ class_name = extract_class_name(line)
85
+ continue
86
+ if method_name and ("def __init__(" in line or "def __new__(" in line):
87
+ if "def __new__(" in line:
88
+ special_output_type.add(method_name)
89
+ open_paren_count += 1
90
+ start = line.find("(") + len("(")
91
+ line = line[start:]
92
+ if open_paren_count > 0:
93
+ open_paren_count += line.count('(')
94
+ open_paren_count -= line.count(')')
95
+ if open_paren_count == 0:
96
+ end = line.rfind(')')
97
+ signature += line[:end]
98
+ method_to_signature[method_name] = process_signature(signature)
99
+ method_to_class_name[method_name] = class_name
100
+ method_name, class_name, signature = "", "", ""
101
+ elif open_paren_count < 0:
102
+ raise RuntimeError("open parenthesis count < 0. This shouldn't be possible.")
103
+ else:
104
+ signature += line.strip('\n').strip(' ')
105
+ return method_to_signature, method_to_class_name, special_output_type, doc_string_dict
106
+
107
+
108
+ def parse_datapipe_files(file_paths: Set[str]) -> Tuple[Dict[str, str], Dict[str, str], Set[str], Dict[str, List[str]]]:
109
+ methods_and_signatures, methods_and_class_names, methods_with_special_output_types = {}, {}, set()
110
+ methods_and_doc_strings = {}
111
+ for path in file_paths:
112
+ (
113
+ method_to_signature,
114
+ method_to_class_name,
115
+ methods_needing_special_output_types,
116
+ doc_string_dict,
117
+ ) = parse_datapipe_file(path)
118
+ methods_and_signatures.update(method_to_signature)
119
+ methods_and_class_names.update(method_to_class_name)
120
+ methods_with_special_output_types.update(methods_needing_special_output_types)
121
+ methods_and_doc_strings.update(doc_string_dict)
122
+ return methods_and_signatures, methods_and_class_names, methods_with_special_output_types, methods_and_doc_strings
123
+
124
+
125
+ def split_outside_bracket(line: str, delimiter: str = ",") -> List[str]:
126
+ """Given a line of text, split it on comma unless the comma is within a bracket '[]'."""
127
+ bracket_count = 0
128
+ curr_token = ""
129
+ res = []
130
+ for char in line:
131
+ if char == "[":
132
+ bracket_count += 1
133
+ elif char == "]":
134
+ bracket_count -= 1
135
+ elif char == delimiter and bracket_count == 0:
136
+ res.append(curr_token)
137
+ curr_token = ""
138
+ continue
139
+ curr_token += char
140
+ res.append(curr_token)
141
+ return res
142
+
143
+
144
+ def process_signature(line: str) -> str:
145
+ """
146
+ Clean up a given raw function signature.
147
+
148
+ This includes removing the self-referential datapipe argument, default
149
+ arguments of input functions, newlines, and spaces.
150
+ """
151
+ tokens: List[str] = split_outside_bracket(line)
152
+ for i, token in enumerate(tokens):
153
+ tokens[i] = token.strip(' ')
154
+ if token == "cls":
155
+ tokens[i] = "self"
156
+ elif i > 0 and ("self" == tokens[i - 1]) and (tokens[i][0] != "*"):
157
+ # Remove the datapipe after 'self' or 'cls' unless it has '*'
158
+ tokens[i] = ""
159
+ elif "Callable =" in token: # Remove default argument if it is a function
160
+ head, default_arg = token.rsplit("=", 2)
161
+ tokens[i] = head.strip(' ') + "= ..."
162
+ tokens = [t for t in tokens if t != ""]
163
+ line = ', '.join(tokens)
164
+ return line
165
+
166
+
167
+ def get_method_definitions(file_path: Union[str, List[str]],
168
+ files_to_exclude: Set[str],
169
+ deprecated_files: Set[str],
170
+ default_output_type: str,
171
+ method_to_special_output_type: Dict[str, str],
172
+ root: str = "") -> List[str]:
173
+ """
174
+ #.pyi generation for functional DataPipes Process.
175
+
176
+ # 1. Find files that we want to process (exclude the ones who don't)
177
+ # 2. Parse method name and signature
178
+ # 3. Remove first argument after self (unless it is "*datapipes"), default args, and spaces
179
+ """
180
+ if root == "":
181
+ root = str(pathlib.Path(__file__).parent.resolve())
182
+ file_path = [file_path] if isinstance(file_path, str) else file_path
183
+ file_path = [os.path.join(root, path) for path in file_path]
184
+ file_paths = find_file_paths(file_path,
185
+ files_to_exclude=files_to_exclude.union(deprecated_files))
186
+ methods_and_signatures, methods_and_class_names, methods_w_special_output_types, methods_and_doc_strings = \
187
+ parse_datapipe_files(file_paths)
188
+
189
+ for fn_name in method_to_special_output_type:
190
+ if fn_name not in methods_w_special_output_types:
191
+ methods_w_special_output_types.add(fn_name)
192
+
193
+ method_definitions = []
194
+ for method_name, arguments in methods_and_signatures.items():
195
+ class_name = methods_and_class_names[method_name]
196
+ if method_name in methods_w_special_output_types:
197
+ output_type = method_to_special_output_type[method_name]
198
+ else:
199
+ output_type = default_output_type
200
+ doc_string = "".join(methods_and_doc_strings[method_name])
201
+ if doc_string == "":
202
+ doc_string = " ...\n"
203
+ method_definitions.append(f"# Functional form of '{class_name}'\n"
204
+ f"def {method_name}({arguments}) -> {output_type}:\n"
205
+ f"{doc_string}")
206
+ method_definitions.sort(key=lambda s: s.split('\n')[1]) # sorting based on method_name
207
+
208
+ return method_definitions
209
+
210
+
211
+ # Defined outside of main() so they can be imported by TorchData
212
+ iterDP_file_path: str = "iter"
213
+ iterDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"}
214
+ iterDP_deprecated_files: Set[str] = set()
215
+ iterDP_method_to_special_output_type: Dict[str, str] = {"demux": "List[IterDataPipe]", "fork": "List[IterDataPipe]"}
216
+
217
+ mapDP_file_path: str = "map"
218
+ mapDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"}
219
+ mapDP_deprecated_files: Set[str] = set()
220
+ mapDP_method_to_special_output_type: Dict[str, str] = {"shuffle": "IterDataPipe"}
221
+
222
+
223
+ def main() -> None:
224
+ """
225
+ # Inject file into template datapipe.pyi.in.
226
+
227
+ TODO: The current implementation of this script only generates interfaces for built-in methods. To generate
228
+ interface for user-defined DataPipes, consider changing `IterDataPipe.register_datapipe_as_function`.
229
+ """
230
+ iter_method_definitions = get_method_definitions(iterDP_file_path, iterDP_files_to_exclude, iterDP_deprecated_files,
231
+ "IterDataPipe", iterDP_method_to_special_output_type)
232
+
233
+ map_method_definitions = get_method_definitions(mapDP_file_path, mapDP_files_to_exclude, mapDP_deprecated_files,
234
+ "MapDataPipe", mapDP_method_to_special_output_type)
235
+
236
+ path = pathlib.Path(__file__).parent.resolve()
237
+ replacements = [('${IterDataPipeMethods}', iter_method_definitions, 4),
238
+ ('${MapDataPipeMethods}', map_method_definitions, 4)]
239
+ gen_from_template(dir=str(path),
240
+ template_name="datapipe.pyi.in",
241
+ output_name="datapipe.pyi",
242
+ replacements=replacements)
243
+
244
+
245
+ if __name__ == '__main__':
246
+ main()
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from collections import namedtuple
3
+
4
+ from typing import Callable, Iterator, Sized, TypeVar, Optional, Union, Any, Dict, List
5
+
6
+ from torch.utils.data.datapipes._decorator import functional_datapipe
7
+ from torch.utils.data._utils.collate import default_collate
8
+ from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
9
+ from torch.utils.data.datapipes.datapipe import IterDataPipe
10
+ from torch.utils.data.datapipes.utils.common import (_check_unpickable_fn,
11
+ validate_input_col)
12
+
13
+ __all__ = [
14
+ "CollatorIterDataPipe",
15
+ "MapperIterDataPipe",
16
+ ]
17
+
18
+ T_co = TypeVar("T_co", covariant=True)
19
+
20
+
21
+ @functional_datapipe("map")
22
+ class MapperIterDataPipe(IterDataPipe[T_co]):
23
+ r"""
24
+ Applies a function over each item from the source DataPipe (functional name: ``map``).
25
+
26
+ The function can be any regular Python function or partial object. Lambda
27
+ function is not recommended as it is not supported by pickle.
28
+
29
+ Args:
30
+ datapipe: Source Iterable DataPipe
31
+ fn: Function being applied over each item
32
+ input_col: Index or indices of data which ``fn`` is applied, such as:
33
+
34
+ - ``None`` as default to apply ``fn`` to the data directly.
35
+ - Integer(s) is used for list/tuple.
36
+ - Key(s) is used for dict.
37
+
38
+ output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified
39
+ only when ``input_col`` is not ``None``
40
+
41
+ - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with
42
+ multiple indices, the left-most one is used, and other indices will be removed.
43
+ - Integer is used for list/tuple. ``-1`` represents to append result at the end.
44
+ - Key is used for dict. New key is acceptable.
45
+
46
+ Example:
47
+ >>> # xdoctest: +SKIP
48
+ >>> from torchdata.datapipes.iter import IterableWrapper, Mapper
49
+ >>> def add_one(x):
50
+ ... return x + 1
51
+ >>> dp = IterableWrapper(range(10))
52
+ >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred
53
+ >>> list(map_dp_1)
54
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
55
+ >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle`
56
+ >>> # Use `functools.partial` or explicitly define the function instead
57
+ >>> map_dp_2 = Mapper(dp, lambda x: x + 1)
58
+ >>> list(map_dp_2)
59
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
60
+ """
61
+
62
+ datapipe: IterDataPipe
63
+ fn: Callable
64
+
65
+ def __init__(
66
+ self,
67
+ datapipe: IterDataPipe,
68
+ fn: Callable,
69
+ input_col=None,
70
+ output_col=None,
71
+ ) -> None:
72
+ super().__init__()
73
+ self.datapipe = datapipe
74
+
75
+ _check_unpickable_fn(fn)
76
+ self.fn = fn # type: ignore[assignment]
77
+
78
+ self.input_col = input_col
79
+ if input_col is None and output_col is not None:
80
+ raise ValueError("`output_col` must be None when `input_col` is None.")
81
+ if isinstance(output_col, (list, tuple)):
82
+ if len(output_col) > 1:
83
+ raise ValueError("`output_col` must be a single-element list or tuple")
84
+ output_col = output_col[0]
85
+ self.output_col = output_col
86
+ validate_input_col(fn, input_col)
87
+
88
+ def _apply_fn(self, data):
89
+ if self.input_col is None and self.output_col is None:
90
+ return self.fn(data)
91
+
92
+ if self.input_col is None:
93
+ res = self.fn(data)
94
+ elif isinstance(self.input_col, (list, tuple)):
95
+ args = tuple(data[col] for col in self.input_col)
96
+ res = self.fn(*args)
97
+ else:
98
+ res = self.fn(data[self.input_col])
99
+
100
+ # Copy tuple to list and run in-place modification because tuple is immutable.
101
+ if isinstance(data, tuple):
102
+ t_flag = True
103
+ data = list(data)
104
+ else:
105
+ t_flag = False
106
+
107
+ if self.output_col is None:
108
+ if isinstance(self.input_col, (list, tuple)):
109
+ data[self.input_col[0]] = res
110
+ for idx in sorted(self.input_col[1:], reverse=True):
111
+ del data[idx]
112
+ else:
113
+ data[self.input_col] = res
114
+ else:
115
+ if self.output_col == -1:
116
+ data.append(res)
117
+ else:
118
+ data[self.output_col] = res
119
+
120
+ # Convert list back to tuple
121
+ return tuple(data) if t_flag else data
122
+
123
+ def __iter__(self) -> Iterator[T_co]:
124
+ for data in self.datapipe:
125
+ yield self._apply_fn(data)
126
+
127
+ def __len__(self) -> int:
128
+ if isinstance(self.datapipe, Sized):
129
+ return len(self.datapipe)
130
+ raise TypeError(
131
+ f"{type(self).__name__} instance doesn't have valid length"
132
+ )
133
+
134
+
135
+ def _collate_helper(conversion, item):
136
+ # TODO(VitalyFedyunin): Verify that item is any sort of batch
137
+ if len(item.items) > 1:
138
+ # TODO(VitalyFedyunin): Compact all batch dataframes into one
139
+ raise Exception("Only supports one DataFrame per batch")
140
+ df = item[0]
141
+ columns_name = df_wrapper.get_columns(df)
142
+ tuple_names: List = []
143
+ tuple_values: List = []
144
+
145
+ for name in conversion.keys():
146
+ if name not in columns_name:
147
+ raise Exception("Conversion keys missmatch")
148
+
149
+ for name in columns_name:
150
+ if name in conversion:
151
+ if not callable(conversion[name]):
152
+ raise Exception('Collate (DF)DataPipe requires callable as dict values')
153
+ collation_fn = conversion[name]
154
+ else:
155
+ # TODO(VitalyFedyunin): Add default collation into df_wrapper
156
+ try:
157
+ import torcharrow.pytorch as tap # type: ignore[import]
158
+ collation_fn = tap.rec.Default()
159
+ except Exception as e:
160
+ raise Exception("unable to import default collation function from the TorchArrow") from e
161
+
162
+ tuple_names.append(str(name))
163
+ value = collation_fn(df[name])
164
+ tuple_values.append(value)
165
+
166
+ # TODO(VitalyFedyunin): We can dynamically extract types from the tuple_values here
167
+ # TODO(VitalyFedyunin): Instead of ignoring mypy error, make sure tuple_names is not empty
168
+ tpl_cls = namedtuple("CollateResult", tuple_names) # type: ignore[misc]
169
+ tuple = tpl_cls(*tuple_values)
170
+ return tuple
171
+
172
+
173
+ @functional_datapipe("collate")
174
+ class CollatorIterDataPipe(MapperIterDataPipe):
175
+ r"""
176
+ Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``).
177
+
178
+ By default, it uses :func:`torch.utils.data.default_collate`.
179
+
180
+ .. note::
181
+ While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the
182
+ default behavior and `functools.partial` to specify any additional arguments.
183
+
184
+ Args:
185
+ datapipe: Iterable DataPipe being collated
186
+ collate_fn: Customized collate function to collect and combine data or a batch of data.
187
+ Default function collates to Tensor(s) based on data type.
188
+
189
+ Example:
190
+ >>> # xdoctest: +SKIP
191
+ >>> # Convert integer data to float Tensor
192
+ >>> class MyIterDataPipe(torch.utils.data.IterDataPipe):
193
+ ... def __init__(self, start, end):
194
+ ... super(MyIterDataPipe).__init__()
195
+ ... assert end > start, "this example code only works with end >= start"
196
+ ... self.start = start
197
+ ... self.end = end
198
+ ...
199
+ ... def __iter__(self):
200
+ ... return iter(range(self.start, self.end))
201
+ ...
202
+ ... def __len__(self):
203
+ ... return self.end - self.start
204
+ ...
205
+ >>> ds = MyIterDataPipe(start=3, end=7)
206
+ >>> print(list(ds))
207
+ [3, 4, 5, 6]
208
+ >>> def collate_fn(batch):
209
+ ... return torch.tensor(batch, dtype=torch.float)
210
+ ...
211
+ >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn)
212
+ >>> print(list(collated_ds))
213
+ [tensor(3.), tensor(4.), tensor(5.), tensor(6.)]
214
+ """
215
+
216
+ def __init__(
217
+ self,
218
+ datapipe: IterDataPipe,
219
+ conversion: Optional[
220
+ Union[
221
+ Callable[..., Any],
222
+ Dict[Union[str, Any], Union[Callable, Any]],
223
+ ]
224
+ ] = default_collate,
225
+ collate_fn: Optional[Callable] = None,
226
+ ) -> None:
227
+ # TODO(VitalyFedyunin): Replace `Callable[..., Any]` with `Callable[[IColumn], Any]`
228
+ # TODO(VitalyFedyunin): Replace with `Dict[Union[str, IColumn], Union[Callable, Enum]]`
229
+ if collate_fn is not None:
230
+ super().__init__(datapipe, fn=collate_fn)
231
+ else:
232
+ if callable(conversion):
233
+ super().__init__(datapipe, fn=conversion)
234
+ else:
235
+ # TODO(VitalyFedyunin): Validate passed dictionary
236
+ collate_fn = functools.partial(_collate_helper, conversion)
237
+ super().__init__(datapipe, fn=collate_fn)
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import IOBase
2
+ from typing import Iterable, Tuple, Optional
3
+
4
+ from torch.utils.data.datapipes._decorator import functional_datapipe
5
+ from torch.utils.data.datapipes.datapipe import IterDataPipe
6
+ from torch.utils.data.datapipes.utils.common import get_file_binaries_from_pathnames
7
+
8
+ __all__ = [
9
+ "FileOpenerIterDataPipe",
10
+ ]
11
+
12
+
13
+ @functional_datapipe("open_files")
14
+ class FileOpenerIterDataPipe(IterDataPipe[Tuple[str, IOBase]]):
15
+ r"""
16
+ Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``).
17
+
18
+ Args:
19
+ datapipe: Iterable datapipe that provides pathnames
20
+ mode: An optional string that specifies the mode in which
21
+ the file is opened by ``open()``. It defaults to ``r``, other options are
22
+ ``b`` for reading in binary mode and ``t`` for text mode.
23
+ encoding: An optional string that specifies the encoding of the
24
+ underlying file. It defaults to ``None`` to match the default encoding of ``open``.
25
+ length: Nominal length of the datapipe
26
+
27
+ Note:
28
+ The opened file handles will be closed by Python's GC periodically. Users can choose
29
+ to close them explicitly.
30
+
31
+ Example:
32
+ >>> # xdoctest: +SKIP
33
+ >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader
34
+ >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt'))
35
+ >>> dp = FileOpener(dp)
36
+ >>> dp = StreamReader(dp)
37
+ >>> list(dp)
38
+ [('./abc.txt', 'abc')]
39
+ """
40
+
41
+ def __init__(
42
+ self,
43
+ datapipe: Iterable[str],
44
+ mode: str = 'r',
45
+ encoding: Optional[str] = None,
46
+ length: int = -1):
47
+ super().__init__()
48
+ self.datapipe: Iterable = datapipe
49
+ self.mode: str = mode
50
+ self.encoding: Optional[str] = encoding
51
+
52
+ if self.mode not in ('b', 't', 'rb', 'rt', 'r'):
53
+ raise ValueError(f"Invalid mode {mode}")
54
+ # TODO: enforce typing for each instance based on mode, otherwise
55
+ # `argument_validation` with this DataPipe may be potentially broken
56
+
57
+ if 'b' in mode and encoding is not None:
58
+ raise ValueError("binary mode doesn't take an encoding argument")
59
+
60
+ self.length: int = length
61
+
62
+ # Remove annotation due to 'IOBase' is a general type and true type
63
+ # is determined at runtime based on mode. Some `DataPipe` requiring
64
+ # a subtype would cause mypy error.
65
+ def __iter__(self):
66
+ yield from get_file_binaries_from_pathnames(self.datapipe, self.mode, self.encoding)
67
+
68
+ def __len__(self):
69
+ if self.length == -1:
70
+ raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
71
+ return self.length
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Iterator, Tuple, TypeVar
2
+
3
+ from torch.utils.data.datapipes._decorator import functional_datapipe
4
+ from torch.utils.data.datapipes.datapipe import IterDataPipe
5
+ from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
6
+ from torch.utils.data.datapipes.utils.common import (
7
+ _check_unpickable_fn,
8
+ StreamWrapper,
9
+ validate_input_col
10
+ )
11
+
12
+
13
+ __all__ = ["FilterIterDataPipe", ]
14
+
15
+ T = TypeVar('T')
16
+ T_co = TypeVar('T_co', covariant=True)
17
+
18
+
19
+ @functional_datapipe('filter')
20
+ class FilterIterDataPipe(IterDataPipe[T_co]):
21
+ r"""
22
+ Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``).
23
+
24
+ Args:
25
+ datapipe: Iterable DataPipe being filtered
26
+ filter_fn: Customized function mapping an element to a boolean.
27
+ input_col: Index or indices of data which ``filter_fn`` is applied, such as:
28
+
29
+ - ``None`` as default to apply ``filter_fn`` to the data directly.
30
+ - Integer(s) is used for list/tuple.
31
+ - Key(s) is used for dict.
32
+
33
+ Example:
34
+ >>> # xdoctest: +SKIP
35
+ >>> from torchdata.datapipes.iter import IterableWrapper
36
+ >>> def is_even(n):
37
+ ... return n % 2 == 0
38
+ >>> dp = IterableWrapper(range(5))
39
+ >>> filter_dp = dp.filter(filter_fn=is_even)
40
+ >>> list(filter_dp)
41
+ [0, 2, 4]
42
+ """
43
+
44
+ datapipe: IterDataPipe[T_co]
45
+ filter_fn: Callable
46
+
47
+ def __init__(
48
+ self,
49
+ datapipe: IterDataPipe[T_co],
50
+ filter_fn: Callable,
51
+ input_col=None,
52
+ ) -> None:
53
+ super().__init__()
54
+ self.datapipe = datapipe
55
+
56
+ _check_unpickable_fn(filter_fn)
57
+ self.filter_fn = filter_fn # type: ignore[assignment]
58
+
59
+ self.input_col = input_col
60
+ validate_input_col(filter_fn, input_col)
61
+
62
+ def _apply_filter_fn(self, data) -> bool:
63
+ if self.input_col is None:
64
+ return self.filter_fn(data)
65
+ elif isinstance(self.input_col, (list, tuple)):
66
+ args = tuple(data[col] for col in self.input_col)
67
+ return self.filter_fn(*args)
68
+ else:
69
+ return self.filter_fn(data[self.input_col])
70
+
71
+ def __iter__(self) -> Iterator[T_co]:
72
+ for data in self.datapipe:
73
+ condition, filtered = self._returnIfTrue(data)
74
+ if condition:
75
+ yield filtered
76
+ else:
77
+ StreamWrapper.close_streams(data)
78
+
79
+ def _returnIfTrue(self, data: T) -> Tuple[bool, T]:
80
+ condition = self._apply_filter_fn(data)
81
+
82
+ if df_wrapper.is_column(condition):
83
+ # We are operating on DataFrames filter here
84
+ result = []
85
+ for idx, mask in enumerate(df_wrapper.iterate(condition)):
86
+ if mask:
87
+ result.append(df_wrapper.get_item(data, idx))
88
+ if len(result):
89
+ return True, df_wrapper.concat(result)
90
+ else:
91
+ return False, None # type: ignore[return-value]
92
+
93
+ if not isinstance(condition, bool):
94
+ raise ValueError("Boolean output is required for `filter_fn` of FilterIterDataPipe, got", type(condition))
95
+
96
+ return condition, data
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import warnings
3
+ from torch.utils.data.datapipes.datapipe import IterDataPipe
4
+
5
+ __all__ = ["IterableWrapperIterDataPipe", ]
6
+
7
+
8
+ class IterableWrapperIterDataPipe(IterDataPipe):
9
+ r"""
10
+ Wraps an iterable object to create an IterDataPipe.
11
+
12
+ Args:
13
+ iterable: Iterable object to be wrapped into an IterDataPipe
14
+ deepcopy: Option to deepcopy input iterable object for each
15
+ iterator. The copy is made when the first element is read in ``iter()``.
16
+
17
+ .. note::
18
+ If ``deepcopy`` is explicitly set to ``False``, users should ensure
19
+ that the data pipeline doesn't contain any in-place operations over
20
+ the iterable instance to prevent data inconsistency across iterations.
21
+
22
+ Example:
23
+ >>> # xdoctest: +SKIP
24
+ >>> from torchdata.datapipes.iter import IterableWrapper
25
+ >>> dp = IterableWrapper(range(10))
26
+ >>> list(dp)
27
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
28
+ """
29
+
30
+ def __init__(self, iterable, deepcopy=True):
31
+ self.iterable = iterable
32
+ self.deepcopy = deepcopy
33
+
34
+ def __iter__(self):
35
+ source_data = self.iterable
36
+ if self.deepcopy:
37
+ try:
38
+ source_data = copy.deepcopy(self.iterable)
39
+ # For the case that data cannot be deep-copied,
40
+ # all in-place operations will affect iterable variable.
41
+ # When this DataPipe is iterated second time, it will
42
+ # yield modified items.
43
+ except TypeError:
44
+ warnings.warn(
45
+ "The input iterable can not be deepcopied, "
46
+ "please be aware of in-place modification would affect source data."
47
+ )
48
+ yield from source_data
49
+
50
+ def __len__(self):
51
+ return len(self.iterable)
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Functional DataPipe
2
+ from torch.utils.data.datapipes.map.callable import MapperMapDataPipe as Mapper
3
+ from torch.utils.data.datapipes.map.combinatorics import ShufflerIterDataPipe as Shuffler
4
+ from torch.utils.data.datapipes.map.combining import (
5
+ ConcaterMapDataPipe as Concater,
6
+ ZipperMapDataPipe as Zipper
7
+ )
8
+ from torch.utils.data.datapipes.map.grouping import (
9
+ BatcherMapDataPipe as Batcher
10
+ )
11
+ from torch.utils.data.datapipes.map.utils import SequenceWrapperMapDataPipe as SequenceWrapper
12
+
13
+
14
+ __all__ = ['Batcher', 'Concater', 'Mapper', 'SequenceWrapper', 'Shuffler', 'Zipper']
15
+
16
+ # Please keep this list sorted
17
+ assert __all__ == sorted(__all__)