Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/10.attention.dense.weight/fp32.pt +3 -0
- ckpts/universal/global_step120/zero/20.attention.dense.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/torch/utils/bottleneck/__init__.py +0 -0
- venv/lib/python3.10/site-packages/torch/utils/bottleneck/__main__.py +229 -0
- venv/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__main__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataset.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/distributed.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph_settings.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/sampler.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/__init__.py +51 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/fetch.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py +316 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/fetch.py +54 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/pin_memory.py +98 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py +72 -0
- venv/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py +329 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_hook_iterator.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_typing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/gen_pyi.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__init__.py +64 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/callable.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combinatorics.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combining.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/filelister.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/fileopener.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/grouping.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/routeddecoder.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/selecting.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/sharding.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/streamreader.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combinatorics.py +183 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combining.py +639 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/filelister.py +66 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/grouping.py +300 -0
- venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/routeddecoder.py +66 -0
ckpts/universal/global_step120/zero/10.attention.dense.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6dfbf53ff7c578bb6b0b049731ae4f8846bd4a2cd10926ed84a6be8b53e1931e
|
3 |
+
size 16778317
|
ckpts/universal/global_step120/zero/20.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd88d1290414c447426d4af36f9c119f3309ff614df0fce9c1831dc11d19fba6
|
3 |
+
size 16778411
|
venv/lib/python3.10/site-packages/torch/utils/bottleneck/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/torch/utils/bottleneck/__main__.py
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import cProfile
|
3 |
+
import pstats
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
from typing import Dict
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from torch.autograd import profiler
|
10 |
+
from torch.utils.collect_env import get_env_info
|
11 |
+
|
12 |
+
|
13 |
+
def redirect_argv(new_argv):
|
14 |
+
sys.argv[:] = new_argv[:]
|
15 |
+
|
16 |
+
|
17 |
+
def compiled_with_cuda(sysinfo):
|
18 |
+
if sysinfo.cuda_compiled_version:
|
19 |
+
return f'compiled w/ CUDA {sysinfo.cuda_compiled_version}'
|
20 |
+
return 'not compiled w/ CUDA'
|
21 |
+
|
22 |
+
|
23 |
+
env_summary = """
|
24 |
+
--------------------------------------------------------------------------------
|
25 |
+
Environment Summary
|
26 |
+
--------------------------------------------------------------------------------
|
27 |
+
PyTorch {pytorch_version}{debug_str} {cuda_compiled}
|
28 |
+
Running with Python {py_version} and {cuda_runtime}
|
29 |
+
|
30 |
+
`{pip_version} list` truncated output:
|
31 |
+
{pip_list_output}
|
32 |
+
""".strip()
|
33 |
+
|
34 |
+
|
35 |
+
def run_env_analysis():
|
36 |
+
print('Running environment analysis...')
|
37 |
+
info = get_env_info()
|
38 |
+
|
39 |
+
result: Dict[str, str] = {}
|
40 |
+
|
41 |
+
debug_str = ''
|
42 |
+
if info.is_debug_build:
|
43 |
+
debug_str = ' DEBUG'
|
44 |
+
|
45 |
+
cuda_avail = ''
|
46 |
+
if info.is_cuda_available:
|
47 |
+
cuda = info.cuda_runtime_version
|
48 |
+
if cuda is not None:
|
49 |
+
cuda_avail = 'CUDA ' + cuda
|
50 |
+
else:
|
51 |
+
cuda = 'CUDA unavailable'
|
52 |
+
|
53 |
+
pip_version = info.pip_version
|
54 |
+
pip_list_output = info.pip_packages
|
55 |
+
if pip_list_output is None:
|
56 |
+
pip_list_output = 'Unable to fetch'
|
57 |
+
|
58 |
+
result = {
|
59 |
+
'debug_str': debug_str,
|
60 |
+
'pytorch_version': info.torch_version,
|
61 |
+
'cuda_compiled': compiled_with_cuda(info),
|
62 |
+
'py_version': f'{sys.version_info[0]}.{sys.version_info[1]}',
|
63 |
+
'cuda_runtime': cuda_avail,
|
64 |
+
'pip_version': pip_version,
|
65 |
+
'pip_list_output': pip_list_output,
|
66 |
+
}
|
67 |
+
|
68 |
+
return env_summary.format(**result)
|
69 |
+
|
70 |
+
|
71 |
+
def run_cprofile(code, globs, launch_blocking=False):
|
72 |
+
print('Running your script with cProfile')
|
73 |
+
prof = cProfile.Profile()
|
74 |
+
prof.enable()
|
75 |
+
exec(code, globs, None)
|
76 |
+
prof.disable()
|
77 |
+
return prof
|
78 |
+
|
79 |
+
|
80 |
+
cprof_summary = """
|
81 |
+
--------------------------------------------------------------------------------
|
82 |
+
cProfile output
|
83 |
+
--------------------------------------------------------------------------------
|
84 |
+
""".strip()
|
85 |
+
|
86 |
+
|
87 |
+
def print_cprofile_summary(prof, sortby='tottime', topk=15):
|
88 |
+
print(cprof_summary)
|
89 |
+
cprofile_stats = pstats.Stats(prof).sort_stats(sortby)
|
90 |
+
cprofile_stats.print_stats(topk)
|
91 |
+
|
92 |
+
|
93 |
+
def run_autograd_prof(code, globs):
|
94 |
+
def run_prof(use_cuda=False):
|
95 |
+
with profiler.profile(use_cuda=use_cuda) as prof:
|
96 |
+
exec(code, globs, None)
|
97 |
+
return prof
|
98 |
+
|
99 |
+
print('Running your script with the autograd profiler...')
|
100 |
+
result = [run_prof(use_cuda=False)]
|
101 |
+
if torch.cuda.is_available():
|
102 |
+
result.append(run_prof(use_cuda=True))
|
103 |
+
else:
|
104 |
+
result.append(None)
|
105 |
+
|
106 |
+
return result
|
107 |
+
|
108 |
+
|
109 |
+
autograd_prof_summary = """
|
110 |
+
--------------------------------------------------------------------------------
|
111 |
+
autograd profiler output ({mode} mode)
|
112 |
+
--------------------------------------------------------------------------------
|
113 |
+
{description}
|
114 |
+
{cuda_warning}
|
115 |
+
{output}
|
116 |
+
""".strip()
|
117 |
+
|
118 |
+
|
119 |
+
def print_autograd_prof_summary(prof, mode, sortby='cpu_time', topk=15):
|
120 |
+
valid_sortby = ['cpu_time', 'cuda_time', 'cpu_time_total', 'cuda_time_total', 'count']
|
121 |
+
if sortby not in valid_sortby:
|
122 |
+
warn = ('WARNING: invalid sorting option for autograd profiler results: {}\n'
|
123 |
+
'Expected `cpu_time`, `cpu_time_total`, or `count`. '
|
124 |
+
'Defaulting to `cpu_time`.')
|
125 |
+
print(warn.format(sortby))
|
126 |
+
sortby = 'cpu_time'
|
127 |
+
|
128 |
+
if mode == 'CUDA':
|
129 |
+
cuda_warning = ('\n\tBecause the autograd profiler uses the CUDA event API,\n'
|
130 |
+
'\tthe CUDA time column reports approximately max(cuda_time, cpu_time).\n'
|
131 |
+
'\tPlease ignore this output if your code does not use CUDA.\n')
|
132 |
+
else:
|
133 |
+
cuda_warning = ''
|
134 |
+
|
135 |
+
sorted_events = sorted(prof.function_events,
|
136 |
+
key=lambda x: getattr(x, sortby), reverse=True)
|
137 |
+
topk_events = sorted_events[:topk]
|
138 |
+
|
139 |
+
result = {
|
140 |
+
'mode': mode,
|
141 |
+
'description': f'top {topk} events sorted by {sortby}',
|
142 |
+
'output': torch.autograd.profiler_util._build_table(topk_events),
|
143 |
+
'cuda_warning': cuda_warning
|
144 |
+
}
|
145 |
+
|
146 |
+
print(autograd_prof_summary.format(**result))
|
147 |
+
|
148 |
+
|
149 |
+
descript = """
|
150 |
+
`bottleneck` is a tool that can be used as an initial step for debugging
|
151 |
+
bottlenecks in your program.
|
152 |
+
|
153 |
+
It summarizes runs of your script with the Python profiler and PyTorch\'s
|
154 |
+
autograd profiler. Because your script will be profiled, please ensure that it
|
155 |
+
exits in a finite amount of time.
|
156 |
+
|
157 |
+
For more complicated uses of the profilers, please see
|
158 |
+
https://docs.python.org/3/library/profile.html and
|
159 |
+
https://pytorch.org/docs/master/autograd.html#profiler for more information.
|
160 |
+
""".strip()
|
161 |
+
|
162 |
+
|
163 |
+
def parse_args():
|
164 |
+
parser = argparse.ArgumentParser(description=descript)
|
165 |
+
parser.add_argument('scriptfile', type=str,
|
166 |
+
help='Path to the script to be run. '
|
167 |
+
'Usually run with `python path/to/script`.')
|
168 |
+
parser.add_argument('args', type=str, nargs=argparse.REMAINDER,
|
169 |
+
help='Command-line arguments to be passed to the script.')
|
170 |
+
return parser.parse_args()
|
171 |
+
|
172 |
+
|
173 |
+
def cpu_time_total(autograd_prof):
|
174 |
+
return sum([event.cpu_time_total for event in autograd_prof.function_events])
|
175 |
+
|
176 |
+
|
177 |
+
def main():
|
178 |
+
args = parse_args()
|
179 |
+
|
180 |
+
# Customizable constants.
|
181 |
+
scriptfile = args.scriptfile
|
182 |
+
scriptargs = [] if args.args is None else args.args
|
183 |
+
scriptargs.insert(0, scriptfile)
|
184 |
+
cprofile_sortby = 'tottime'
|
185 |
+
cprofile_topk = 15
|
186 |
+
autograd_prof_sortby = 'cpu_time_total'
|
187 |
+
autograd_prof_topk = 15
|
188 |
+
|
189 |
+
redirect_argv(scriptargs)
|
190 |
+
|
191 |
+
sys.path.insert(0, os.path.dirname(scriptfile))
|
192 |
+
with open(scriptfile, 'rb') as stream:
|
193 |
+
code = compile(stream.read(), scriptfile, 'exec')
|
194 |
+
globs = {
|
195 |
+
'__file__': scriptfile,
|
196 |
+
'__name__': '__main__',
|
197 |
+
'__package__': None,
|
198 |
+
'__cached__': None,
|
199 |
+
}
|
200 |
+
|
201 |
+
print(descript)
|
202 |
+
|
203 |
+
env_summary = run_env_analysis()
|
204 |
+
|
205 |
+
if torch.cuda.is_available():
|
206 |
+
torch.cuda.init()
|
207 |
+
cprofile_prof = run_cprofile(code, globs)
|
208 |
+
autograd_prof_cpu, autograd_prof_cuda = run_autograd_prof(code, globs)
|
209 |
+
|
210 |
+
print(env_summary)
|
211 |
+
print_cprofile_summary(cprofile_prof, cprofile_sortby, cprofile_topk)
|
212 |
+
|
213 |
+
if not torch.cuda.is_available():
|
214 |
+
print_autograd_prof_summary(autograd_prof_cpu, 'CPU', autograd_prof_sortby, autograd_prof_topk)
|
215 |
+
return
|
216 |
+
|
217 |
+
# Print both the result of the CPU-mode and CUDA-mode autograd profilers
|
218 |
+
# if their execution times are very different.
|
219 |
+
cuda_prof_exec_time = cpu_time_total(autograd_prof_cuda)
|
220 |
+
if len(autograd_prof_cpu.function_events) > 0:
|
221 |
+
cpu_prof_exec_time = cpu_time_total(autograd_prof_cpu)
|
222 |
+
pct_diff = (cuda_prof_exec_time - cpu_prof_exec_time) / cuda_prof_exec_time
|
223 |
+
if abs(pct_diff) > 0.05:
|
224 |
+
print_autograd_prof_summary(autograd_prof_cpu, 'CPU', autograd_prof_sortby, autograd_prof_topk)
|
225 |
+
|
226 |
+
print_autograd_prof_summary(autograd_prof_cuda, 'CUDA', autograd_prof_sortby, autograd_prof_topk)
|
227 |
+
|
228 |
+
if __name__ == '__main__':
|
229 |
+
main()
|
venv/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (190 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__main__.cpython-310.pyc
ADDED
Binary file (6.81 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.41 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc
ADDED
Binary file (476 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc
ADDED
Binary file (28 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataset.cpython-310.pyc
ADDED
Binary file (19.5 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/distributed.cpython-310.pyc
ADDED
Binary file (5.06 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph.cpython-310.pyc
ADDED
Binary file (4.6 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph_settings.cpython-310.pyc
ADDED
Binary file (4.85 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/__pycache__/sampler.cpython-310.pyc
ADDED
Binary file (11 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/__init__.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""Utility classes & functions for data loading. Code in this folder is mostly used by ../dataloder.py.
|
2 |
+
|
3 |
+
A lot of multiprocessing is used in data loading, which only supports running
|
4 |
+
functions defined in global environment (py2 can't serialize static methods).
|
5 |
+
Therefore, for code tidiness we put these functions into different files in this
|
6 |
+
folder.
|
7 |
+
"""
|
8 |
+
|
9 |
+
import sys
|
10 |
+
import atexit
|
11 |
+
|
12 |
+
# old private location of the ExceptionWrapper that some users rely on:
|
13 |
+
from torch._utils import ExceptionWrapper
|
14 |
+
|
15 |
+
|
16 |
+
IS_WINDOWS = sys.platform == "win32"
|
17 |
+
|
18 |
+
|
19 |
+
MP_STATUS_CHECK_INTERVAL = 5.0
|
20 |
+
r"""Interval (in seconds) to check status of processes to avoid hanging in
|
21 |
+
multiprocessing data loading. This is mainly used in getting data from
|
22 |
+
another process, in which case we need to periodically check whether the
|
23 |
+
sender is alive to prevent hanging."""
|
24 |
+
|
25 |
+
|
26 |
+
python_exit_status = False
|
27 |
+
r"""Whether Python is shutting down. This flag is guaranteed to be set before
|
28 |
+
the Python core library resources are freed, but Python may already be exiting
|
29 |
+
for some time when this is set.
|
30 |
+
|
31 |
+
Hook to set this flag is `_set_python_exit_flag`, and is inspired by a similar
|
32 |
+
hook in Python 3.7 multiprocessing library:
|
33 |
+
https://github.com/python/cpython/blob/d4d60134b29290049e28df54f23493de4f1824b6/Lib/multiprocessing/util.py#L277-L327
|
34 |
+
"""
|
35 |
+
|
36 |
+
|
37 |
+
try:
|
38 |
+
import numpy
|
39 |
+
HAS_NUMPY = True
|
40 |
+
except ModuleNotFoundError:
|
41 |
+
HAS_NUMPY = False
|
42 |
+
|
43 |
+
|
44 |
+
def _set_python_exit_flag():
|
45 |
+
global python_exit_status
|
46 |
+
python_exit_status = True
|
47 |
+
|
48 |
+
atexit.register(_set_python_exit_flag)
|
49 |
+
|
50 |
+
|
51 |
+
from . import worker, signal_handling, pin_memory, collate, fetch
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.12 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc
ADDED
Binary file (13 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/fetch.cpython-310.pyc
ADDED
Binary file (2.28 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc
ADDED
Binary file (3.07 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc
ADDED
Binary file (2.65 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc
ADDED
Binary file (7.73 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""Contains definitions of the methods used by the _BaseDataLoaderIter workers.
|
2 |
+
|
3 |
+
These methods are used to collate samples fetched from dataset into Tensor(s).
|
4 |
+
These **needs** to be in global scope since Py2 doesn't support serializing
|
5 |
+
static methods.
|
6 |
+
|
7 |
+
`default_collate` and `default_convert` are exposed to users via 'dataloader.py'.
|
8 |
+
"""
|
9 |
+
|
10 |
+
import collections
|
11 |
+
import contextlib
|
12 |
+
import copy
|
13 |
+
import re
|
14 |
+
import torch
|
15 |
+
|
16 |
+
from typing import Callable, Dict, Optional, Tuple, Type, Union
|
17 |
+
|
18 |
+
np_str_obj_array_pattern = re.compile(r'[SaUO]')
|
19 |
+
|
20 |
+
|
21 |
+
def default_convert(data):
|
22 |
+
r"""
|
23 |
+
Convert each NumPy array element into a :class:`torch.Tensor`.
|
24 |
+
|
25 |
+
If the input is a `Sequence`, `Collection`, or `Mapping`, it tries to convert each element inside to a :class:`torch.Tensor`.
|
26 |
+
If the input is not an NumPy array, it is left unchanged.
|
27 |
+
This is used as the default function for collation when both `batch_sampler` and `batch_size`
|
28 |
+
are NOT defined in :class:`~torch.utils.data.DataLoader`.
|
29 |
+
|
30 |
+
The general input type to output type mapping is similar to that
|
31 |
+
of :func:`~torch.utils.data.default_collate`. See the description there for more details.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
data: a single data point to be converted
|
35 |
+
|
36 |
+
Examples:
|
37 |
+
>>> # xdoctest: +SKIP
|
38 |
+
>>> # Example with `int`
|
39 |
+
>>> default_convert(0)
|
40 |
+
0
|
41 |
+
>>> # Example with NumPy array
|
42 |
+
>>> default_convert(np.array([0, 1]))
|
43 |
+
tensor([0, 1])
|
44 |
+
>>> # Example with NamedTuple
|
45 |
+
>>> Point = namedtuple('Point', ['x', 'y'])
|
46 |
+
>>> default_convert(Point(0, 0))
|
47 |
+
Point(x=0, y=0)
|
48 |
+
>>> default_convert(Point(np.array(0), np.array(0)))
|
49 |
+
Point(x=tensor(0), y=tensor(0))
|
50 |
+
>>> # Example with List
|
51 |
+
>>> default_convert([np.array([0, 1]), np.array([2, 3])])
|
52 |
+
[tensor([0, 1]), tensor([2, 3])]
|
53 |
+
"""
|
54 |
+
elem_type = type(data)
|
55 |
+
if isinstance(data, torch.Tensor):
|
56 |
+
return data
|
57 |
+
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
|
58 |
+
and elem_type.__name__ != 'string_':
|
59 |
+
# array of string classes and object
|
60 |
+
if elem_type.__name__ == 'ndarray' \
|
61 |
+
and np_str_obj_array_pattern.search(data.dtype.str) is not None:
|
62 |
+
return data
|
63 |
+
return torch.as_tensor(data)
|
64 |
+
elif isinstance(data, collections.abc.Mapping):
|
65 |
+
try:
|
66 |
+
if isinstance(data, collections.abc.MutableMapping):
|
67 |
+
# The mapping type may have extra properties, so we can't just
|
68 |
+
# use `type(data)(...)` to create the new mapping.
|
69 |
+
# Create a clone and update it if the mapping type is mutable.
|
70 |
+
clone = copy.copy(data)
|
71 |
+
clone.update({key: default_convert(data[key]) for key in data})
|
72 |
+
return clone
|
73 |
+
else:
|
74 |
+
return elem_type({key: default_convert(data[key]) for key in data})
|
75 |
+
except TypeError:
|
76 |
+
# The mapping type may not support `copy()` / `update(mapping)`
|
77 |
+
# or `__init__(iterable)`.
|
78 |
+
return {key: default_convert(data[key]) for key in data}
|
79 |
+
elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple
|
80 |
+
return elem_type(*(default_convert(d) for d in data))
|
81 |
+
elif isinstance(data, tuple):
|
82 |
+
return [default_convert(d) for d in data] # Backwards compatibility.
|
83 |
+
elif isinstance(data, collections.abc.Sequence) and not isinstance(data, (str, bytes)):
|
84 |
+
try:
|
85 |
+
if isinstance(data, collections.abc.MutableSequence):
|
86 |
+
# The sequence type may have extra properties, so we can't just
|
87 |
+
# use `type(data)(...)` to create the new sequence.
|
88 |
+
# Create a clone and update it if the sequence type is mutable.
|
89 |
+
clone = copy.copy(data) # type: ignore[arg-type]
|
90 |
+
for i, d in enumerate(data):
|
91 |
+
clone[i] = default_convert(d)
|
92 |
+
return clone
|
93 |
+
else:
|
94 |
+
return elem_type([default_convert(d) for d in data])
|
95 |
+
except TypeError:
|
96 |
+
# The sequence type may not support `copy()` / `__setitem__(index, item)`
|
97 |
+
# or `__init__(iterable)` (e.g., `range`).
|
98 |
+
return [default_convert(d) for d in data]
|
99 |
+
else:
|
100 |
+
return data
|
101 |
+
|
102 |
+
|
103 |
+
default_collate_err_msg_format = (
|
104 |
+
"default_collate: batch must contain tensors, numpy arrays, numbers, "
|
105 |
+
"dicts or lists; found {}")
|
106 |
+
|
107 |
+
|
108 |
+
def collate(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None):
|
109 |
+
r"""
|
110 |
+
General collate function that handles collection type of element within each batch.
|
111 |
+
|
112 |
+
The function also opens function registry to deal with specific element types. `default_collate_fn_map`
|
113 |
+
provides default collate functions for tensors, numpy arrays, numbers and strings.
|
114 |
+
|
115 |
+
Args:
|
116 |
+
batch: a single batch to be collated
|
117 |
+
collate_fn_map: Optional dictionary mapping from element type to the corresponding collate function.
|
118 |
+
If the element type isn't present in this dictionary,
|
119 |
+
this function will go through each key of the dictionary in the insertion order to
|
120 |
+
invoke the corresponding collate function if the element type is a subclass of the key.
|
121 |
+
|
122 |
+
Examples:
|
123 |
+
>>> def collate_tensor_fn(batch, *, collate_fn_map):
|
124 |
+
>>> # Extend this function to handle batch of tensors
|
125 |
+
... return torch.stack(batch, 0)
|
126 |
+
>>> def custom_collate(batch):
|
127 |
+
... collate_map = {torch.Tensor: collate_tensor_fn}
|
128 |
+
... return collate(batch, collate_fn_map=collate_map)
|
129 |
+
>>> # Extend `default_collate` by in-place modifying `default_collate_fn_map`
|
130 |
+
>>> default_collate_fn_map.update({torch.Tensor: collate_tensor_fn})
|
131 |
+
|
132 |
+
Note:
|
133 |
+
Each collate function requires a positional argument for batch and a keyword argument
|
134 |
+
for the dictionary of collate functions as `collate_fn_map`.
|
135 |
+
"""
|
136 |
+
elem = batch[0]
|
137 |
+
elem_type = type(elem)
|
138 |
+
|
139 |
+
if collate_fn_map is not None:
|
140 |
+
if elem_type in collate_fn_map:
|
141 |
+
return collate_fn_map[elem_type](batch, collate_fn_map=collate_fn_map)
|
142 |
+
|
143 |
+
for collate_type in collate_fn_map:
|
144 |
+
if isinstance(elem, collate_type):
|
145 |
+
return collate_fn_map[collate_type](batch, collate_fn_map=collate_fn_map)
|
146 |
+
|
147 |
+
if isinstance(elem, collections.abc.Mapping):
|
148 |
+
try:
|
149 |
+
if isinstance(elem, collections.abc.MutableMapping):
|
150 |
+
# The mapping type may have extra properties, so we can't just
|
151 |
+
# use `type(data)(...)` to create the new mapping.
|
152 |
+
# Create a clone and update it if the mapping type is mutable.
|
153 |
+
clone = copy.copy(elem)
|
154 |
+
clone.update({key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem})
|
155 |
+
return clone
|
156 |
+
else:
|
157 |
+
return elem_type({key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem})
|
158 |
+
except TypeError:
|
159 |
+
# The mapping type may not support `copy()` / `update(mapping)`
|
160 |
+
# or `__init__(iterable)`.
|
161 |
+
return {key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem}
|
162 |
+
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
|
163 |
+
return elem_type(*(collate(samples, collate_fn_map=collate_fn_map) for samples in zip(*batch)))
|
164 |
+
elif isinstance(elem, collections.abc.Sequence):
|
165 |
+
# check to make sure that the elements in batch have consistent size
|
166 |
+
it = iter(batch)
|
167 |
+
elem_size = len(next(it))
|
168 |
+
if not all(len(elem) == elem_size for elem in it):
|
169 |
+
raise RuntimeError('each element in list of batch should be of equal size')
|
170 |
+
transposed = list(zip(*batch)) # It may be accessed twice, so we use a list.
|
171 |
+
|
172 |
+
if isinstance(elem, tuple):
|
173 |
+
return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed] # Backwards compatibility.
|
174 |
+
else:
|
175 |
+
try:
|
176 |
+
if isinstance(elem, collections.abc.MutableSequence):
|
177 |
+
# The sequence type may have extra properties, so we can't just
|
178 |
+
# use `type(data)(...)` to create the new sequence.
|
179 |
+
# Create a clone and update it if the sequence type is mutable.
|
180 |
+
clone = copy.copy(elem) # type: ignore[arg-type]
|
181 |
+
for i, samples in enumerate(transposed):
|
182 |
+
clone[i] = collate(samples, collate_fn_map=collate_fn_map)
|
183 |
+
return clone
|
184 |
+
else:
|
185 |
+
return elem_type([collate(samples, collate_fn_map=collate_fn_map) for samples in transposed])
|
186 |
+
except TypeError:
|
187 |
+
# The sequence type may not support `copy()` / `__setitem__(index, item)`
|
188 |
+
# or `__init__(iterable)` (e.g., `range`).
|
189 |
+
return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed]
|
190 |
+
|
191 |
+
raise TypeError(default_collate_err_msg_format.format(elem_type))
|
192 |
+
|
193 |
+
|
194 |
+
def collate_tensor_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None):
|
195 |
+
elem = batch[0]
|
196 |
+
out = None
|
197 |
+
if elem.is_nested:
|
198 |
+
raise RuntimeError(
|
199 |
+
"Batches of nested tensors are not currently supported by the default collate_fn; "
|
200 |
+
"please provide a custom collate_fn to handle them appropriately."
|
201 |
+
)
|
202 |
+
if elem.layout in {torch.sparse_coo, torch.sparse_csr, torch.sparse_bsr, torch.sparse_csc, torch.sparse_bsc}:
|
203 |
+
raise RuntimeError(
|
204 |
+
"Batches of sparse tensors are not currently supported by the default collate_fn; "
|
205 |
+
"please provide a custom collate_fn to handle them appropriately."
|
206 |
+
)
|
207 |
+
if torch.utils.data.get_worker_info() is not None:
|
208 |
+
# If we're in a background process, concatenate directly into a
|
209 |
+
# shared memory tensor to avoid an extra copy
|
210 |
+
numel = sum(x.numel() for x in batch)
|
211 |
+
storage = elem._typed_storage()._new_shared(numel, device=elem.device)
|
212 |
+
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
|
213 |
+
return torch.stack(batch, 0, out=out)
|
214 |
+
|
215 |
+
|
216 |
+
def collate_numpy_array_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None):
|
217 |
+
elem = batch[0]
|
218 |
+
# array of string classes and object
|
219 |
+
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
|
220 |
+
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
|
221 |
+
|
222 |
+
return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)
|
223 |
+
|
224 |
+
|
225 |
+
def collate_numpy_scalar_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None):
|
226 |
+
return torch.as_tensor(batch)
|
227 |
+
|
228 |
+
|
229 |
+
def collate_float_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None):
|
230 |
+
return torch.tensor(batch, dtype=torch.float64)
|
231 |
+
|
232 |
+
|
233 |
+
def collate_int_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None):
|
234 |
+
return torch.tensor(batch)
|
235 |
+
|
236 |
+
|
237 |
+
def collate_str_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None):
|
238 |
+
return batch
|
239 |
+
|
240 |
+
|
241 |
+
default_collate_fn_map: Dict[Union[Type, Tuple[Type, ...]], Callable] = {torch.Tensor: collate_tensor_fn}
|
242 |
+
with contextlib.suppress(ImportError):
|
243 |
+
import numpy as np
|
244 |
+
# For both ndarray and memmap (subclass of ndarray)
|
245 |
+
default_collate_fn_map[np.ndarray] = collate_numpy_array_fn
|
246 |
+
# See scalars hierarchy: https://numpy.org/doc/stable/reference/arrays.scalars.html
|
247 |
+
# Skip string scalars
|
248 |
+
default_collate_fn_map[(np.bool_, np.number, np.object_)] = collate_numpy_scalar_fn
|
249 |
+
default_collate_fn_map[float] = collate_float_fn
|
250 |
+
default_collate_fn_map[int] = collate_int_fn
|
251 |
+
default_collate_fn_map[str] = collate_str_fn
|
252 |
+
default_collate_fn_map[bytes] = collate_str_fn
|
253 |
+
|
254 |
+
|
255 |
+
def default_collate(batch):
|
256 |
+
r"""
|
257 |
+
Take in a batch of data and put the elements within the batch into a tensor with an additional outer dimension - batch size.
|
258 |
+
|
259 |
+
The exact output type can be a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a
|
260 |
+
Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.
|
261 |
+
This is used as the default function for collation when
|
262 |
+
`batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.
|
263 |
+
|
264 |
+
Here is the general input type (based on the type of the element within the batch) to output type mapping:
|
265 |
+
|
266 |
+
* :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)
|
267 |
+
* NumPy Arrays -> :class:`torch.Tensor`
|
268 |
+
* `float` -> :class:`torch.Tensor`
|
269 |
+
* `int` -> :class:`torch.Tensor`
|
270 |
+
* `str` -> `str` (unchanged)
|
271 |
+
* `bytes` -> `bytes` (unchanged)
|
272 |
+
* `Mapping[K, V_i]` -> `Mapping[K, default_collate([V_1, V_2, ...])]`
|
273 |
+
* `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[default_collate([V1_1, V1_2, ...]),
|
274 |
+
default_collate([V2_1, V2_2, ...]), ...]`
|
275 |
+
* `Sequence[V1_i, V2_i, ...]` -> `Sequence[default_collate([V1_1, V1_2, ...]),
|
276 |
+
default_collate([V2_1, V2_2, ...]), ...]`
|
277 |
+
|
278 |
+
Args:
|
279 |
+
batch: a single batch to be collated
|
280 |
+
|
281 |
+
Examples:
|
282 |
+
>>> # xdoctest: +SKIP
|
283 |
+
>>> # Example with a batch of `int`s:
|
284 |
+
>>> default_collate([0, 1, 2, 3])
|
285 |
+
tensor([0, 1, 2, 3])
|
286 |
+
>>> # Example with a batch of `str`s:
|
287 |
+
>>> default_collate(['a', 'b', 'c'])
|
288 |
+
['a', 'b', 'c']
|
289 |
+
>>> # Example with `Map` inside the batch:
|
290 |
+
>>> default_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])
|
291 |
+
{'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}
|
292 |
+
>>> # Example with `NamedTuple` inside the batch:
|
293 |
+
>>> Point = namedtuple('Point', ['x', 'y'])
|
294 |
+
>>> default_collate([Point(0, 0), Point(1, 1)])
|
295 |
+
Point(x=tensor([0, 1]), y=tensor([0, 1]))
|
296 |
+
>>> # Example with `Tuple` inside the batch:
|
297 |
+
>>> default_collate([(0, 1), (2, 3)])
|
298 |
+
[tensor([0, 2]), tensor([1, 3])]
|
299 |
+
>>> # Example with `List` inside the batch:
|
300 |
+
>>> default_collate([[0, 1], [2, 3]])
|
301 |
+
[tensor([0, 2]), tensor([1, 3])]
|
302 |
+
>>> # Two options to extend `default_collate` to handle specific type
|
303 |
+
>>> # Option 1: Write custom collate function and invoke `default_collate`
|
304 |
+
>>> def custom_collate(batch):
|
305 |
+
... elem = batch[0]
|
306 |
+
... if isinstance(elem, CustomType): # Some custom condition
|
307 |
+
... return ...
|
308 |
+
... else: # Fall back to `default_collate`
|
309 |
+
... return default_collate(batch)
|
310 |
+
>>> # Option 2: In-place modify `default_collate_fn_map`
|
311 |
+
>>> def collate_customtype_fn(batch, *, collate_fn_map=None):
|
312 |
+
... return ...
|
313 |
+
>>> default_collate_fn_map.update(CustoType, collate_customtype_fn)
|
314 |
+
>>> default_collate(batch) # Handle `CustomType` automatically
|
315 |
+
"""
|
316 |
+
return collate(batch, collate_fn_map=default_collate_fn_map)
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/fetch.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to fetch data from an iterable-style or map-style dataset.
|
2 |
+
|
3 |
+
This logic is shared in both single- and multi-processing data loading.
|
4 |
+
"""
|
5 |
+
|
6 |
+
|
7 |
+
class _BaseDatasetFetcher:
|
8 |
+
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
|
9 |
+
self.dataset = dataset
|
10 |
+
self.auto_collation = auto_collation
|
11 |
+
self.collate_fn = collate_fn
|
12 |
+
self.drop_last = drop_last
|
13 |
+
|
14 |
+
def fetch(self, possibly_batched_index):
|
15 |
+
raise NotImplementedError()
|
16 |
+
|
17 |
+
|
18 |
+
class _IterableDatasetFetcher(_BaseDatasetFetcher):
|
19 |
+
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
|
20 |
+
super().__init__(dataset, auto_collation, collate_fn, drop_last)
|
21 |
+
self.dataset_iter = iter(dataset)
|
22 |
+
self.ended = False
|
23 |
+
|
24 |
+
def fetch(self, possibly_batched_index):
|
25 |
+
if self.ended:
|
26 |
+
raise StopIteration
|
27 |
+
|
28 |
+
if self.auto_collation:
|
29 |
+
data = []
|
30 |
+
for _ in possibly_batched_index:
|
31 |
+
try:
|
32 |
+
data.append(next(self.dataset_iter))
|
33 |
+
except StopIteration:
|
34 |
+
self.ended = True
|
35 |
+
break
|
36 |
+
if len(data) == 0 or (
|
37 |
+
self.drop_last and len(data) < len(possibly_batched_index)
|
38 |
+
):
|
39 |
+
raise StopIteration
|
40 |
+
else:
|
41 |
+
data = next(self.dataset_iter)
|
42 |
+
return self.collate_fn(data)
|
43 |
+
|
44 |
+
|
45 |
+
class _MapDatasetFetcher(_BaseDatasetFetcher):
|
46 |
+
def fetch(self, possibly_batched_index):
|
47 |
+
if self.auto_collation:
|
48 |
+
if hasattr(self.dataset, "__getitems__") and self.dataset.__getitems__:
|
49 |
+
data = self.dataset.__getitems__(possibly_batched_index)
|
50 |
+
else:
|
51 |
+
data = [self.dataset[idx] for idx in possibly_batched_index]
|
52 |
+
else:
|
53 |
+
data = self.dataset[possibly_batched_index]
|
54 |
+
return self.collate_fn(data)
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/pin_memory.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
|
2 |
+
|
3 |
+
These **needs** to be in global scope since Py2 doesn't support serializing
|
4 |
+
static methods.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import collections
|
8 |
+
import copy
|
9 |
+
import queue
|
10 |
+
|
11 |
+
import torch
|
12 |
+
from . import MP_STATUS_CHECK_INTERVAL
|
13 |
+
from torch._utils import ExceptionWrapper
|
14 |
+
|
15 |
+
|
16 |
+
def _pin_memory_loop(in_queue, out_queue, device_id, done_event, device):
|
17 |
+
# This setting is thread local, and prevents the copy in pin_memory from
|
18 |
+
# consuming all CPU cores.
|
19 |
+
torch.set_num_threads(1)
|
20 |
+
|
21 |
+
if device == "cuda":
|
22 |
+
torch.cuda.set_device(device_id)
|
23 |
+
elif device == "xpu":
|
24 |
+
torch.xpu.set_device(device_id) # type: ignore[attr-defined]
|
25 |
+
elif device == torch._C._get_privateuse1_backend_name():
|
26 |
+
custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name())
|
27 |
+
custom_device_mod.set_device(device_id)
|
28 |
+
|
29 |
+
def do_one_step():
|
30 |
+
try:
|
31 |
+
r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
|
32 |
+
except queue.Empty:
|
33 |
+
return
|
34 |
+
idx, data = r
|
35 |
+
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
|
36 |
+
try:
|
37 |
+
data = pin_memory(data, device)
|
38 |
+
except Exception:
|
39 |
+
data = ExceptionWrapper(
|
40 |
+
where=f"in pin memory thread for device {device_id}")
|
41 |
+
r = (idx, data)
|
42 |
+
while not done_event.is_set():
|
43 |
+
try:
|
44 |
+
out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL)
|
45 |
+
break
|
46 |
+
except queue.Full:
|
47 |
+
continue
|
48 |
+
|
49 |
+
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
|
50 |
+
# logic of this function.
|
51 |
+
while not done_event.is_set():
|
52 |
+
# Make sure that we don't preserve any object from one iteration
|
53 |
+
# to the next
|
54 |
+
do_one_step()
|
55 |
+
|
56 |
+
def pin_memory(data, device=None):
|
57 |
+
if isinstance(data, torch.Tensor):
|
58 |
+
return data.pin_memory(device)
|
59 |
+
elif isinstance(data, (str, bytes)):
|
60 |
+
return data
|
61 |
+
elif isinstance(data, collections.abc.Mapping):
|
62 |
+
try:
|
63 |
+
if isinstance(data, collections.abc.MutableMapping):
|
64 |
+
# The sequence type may have extra properties, so we can't just
|
65 |
+
# use `type(data)(...)` to create the new sequence.
|
66 |
+
# Create a clone and update it if the sequence type is mutable.
|
67 |
+
clone = copy.copy(data)
|
68 |
+
clone.update({k: pin_memory(sample, device) for k, sample in data.items()})
|
69 |
+
return clone
|
70 |
+
else:
|
71 |
+
return type(data)({k: pin_memory(sample, device) for k, sample in data.items()}) # type: ignore[call-arg]
|
72 |
+
except TypeError:
|
73 |
+
# The mapping type may not support `copy()` / `update(mapping)`
|
74 |
+
# or `__init__(iterable)`.
|
75 |
+
return {k: pin_memory(sample, device) for k, sample in data.items()}
|
76 |
+
elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple
|
77 |
+
return type(data)(*(pin_memory(sample, device) for sample in data))
|
78 |
+
elif isinstance(data, tuple):
|
79 |
+
return [pin_memory(sample, device) for sample in data] # Backwards compatibility.
|
80 |
+
elif isinstance(data, collections.abc.Sequence):
|
81 |
+
try:
|
82 |
+
if isinstance(data, collections.abc.MutableSequence):
|
83 |
+
# The sequence type may have extra properties, so we can't just
|
84 |
+
# use `type(data)(...)` to create the new sequence.
|
85 |
+
# Create a clone and update it if the sequence type is mutable.
|
86 |
+
clone = copy.copy(data) # type: ignore[arg-type]
|
87 |
+
for i, item in enumerate(data):
|
88 |
+
clone[i] = pin_memory(item, device)
|
89 |
+
return clone
|
90 |
+
return type(data)([pin_memory(sample, device) for sample in data]) # type: ignore[call-arg]
|
91 |
+
except TypeError:
|
92 |
+
# The sequence type may not support `copy()` / `__setitem__(index, item)`
|
93 |
+
# or `__init__(iterable)` (e.g., `range`).
|
94 |
+
return [pin_memory(sample, device) for sample in data]
|
95 |
+
elif hasattr(data, "pin_memory"):
|
96 |
+
return data.pin_memory()
|
97 |
+
else:
|
98 |
+
return data
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""Signal handling for multiprocessing data loading.
|
2 |
+
|
3 |
+
NOTE [ Signal handling in multiprocessing data loading ]
|
4 |
+
|
5 |
+
In cases like DataLoader, if a worker process dies due to bus error/segfault
|
6 |
+
or just hang, the main process will hang waiting for data. This is difficult
|
7 |
+
to avoid on PyTorch side as it can be caused by limited shm, or other
|
8 |
+
libraries users call in the workers. In this file and `DataLoader.cpp`, we make
|
9 |
+
our best effort to provide some error message to users when such unfortunate
|
10 |
+
events happen.
|
11 |
+
|
12 |
+
When a _BaseDataLoaderIter starts worker processes, their pids are registered in a
|
13 |
+
defined in `DataLoader.cpp`: id(_BaseDataLoaderIter) => Collection[ Worker pids ]
|
14 |
+
via `_set_worker_pids`.
|
15 |
+
|
16 |
+
When an error happens in a worker process, the main process received a SIGCHLD,
|
17 |
+
and Python will eventually call the handler registered below
|
18 |
+
(in `_set_SIGCHLD_handler`). In the handler, the `_error_if_any_worker_fails`
|
19 |
+
call checks all registered worker pids and raise proper error message to
|
20 |
+
prevent main process from hanging waiting for data from worker.
|
21 |
+
|
22 |
+
Additionally, at the beginning of each worker's `_utils.worker._worker_loop`,
|
23 |
+
`_set_worker_signal_handlers` is called to register critical signal handlers
|
24 |
+
(e.g., for SIGSEGV, SIGBUS, SIGFPE, SIGTERM) in C, which just prints an error
|
25 |
+
message to stderr before triggering the default handler. So a message will also
|
26 |
+
be printed from the worker process when it is killed by such signals.
|
27 |
+
|
28 |
+
See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for the reasoning of
|
29 |
+
this signal handling design and other mechanism we implement to make our
|
30 |
+
multiprocessing data loading robust to errors.
|
31 |
+
"""
|
32 |
+
|
33 |
+
import signal
|
34 |
+
import threading
|
35 |
+
from . import IS_WINDOWS
|
36 |
+
|
37 |
+
# Some of the following imported functions are not used in this file, but are to
|
38 |
+
# be used `_utils.signal_handling.XXXXX`.
|
39 |
+
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
|
40 |
+
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
|
41 |
+
|
42 |
+
_SIGCHLD_handler_set = False
|
43 |
+
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
|
44 |
+
handler needs to be set for all DataLoaders in a process."""
|
45 |
+
|
46 |
+
|
47 |
+
def _set_SIGCHLD_handler():
|
48 |
+
# Windows doesn't support SIGCHLD handler
|
49 |
+
if IS_WINDOWS:
|
50 |
+
return
|
51 |
+
# can't set signal in child threads
|
52 |
+
if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined]
|
53 |
+
return
|
54 |
+
global _SIGCHLD_handler_set
|
55 |
+
if _SIGCHLD_handler_set:
|
56 |
+
return
|
57 |
+
previous_handler = signal.getsignal(signal.SIGCHLD)
|
58 |
+
if not callable(previous_handler):
|
59 |
+
# This doesn't catch default handler, but SIGCHLD default handler is a
|
60 |
+
# no-op.
|
61 |
+
previous_handler = None
|
62 |
+
|
63 |
+
def handler(signum, frame):
|
64 |
+
# This following call uses `waitid` with WNOHANG from C side. Therefore,
|
65 |
+
# Python can still get and update the process status successfully.
|
66 |
+
_error_if_any_worker_fails()
|
67 |
+
if previous_handler is not None:
|
68 |
+
assert callable(previous_handler)
|
69 |
+
previous_handler(signum, frame)
|
70 |
+
|
71 |
+
signal.signal(signal.SIGCHLD, handler)
|
72 |
+
_SIGCHLD_handler_set = True
|
venv/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers.
|
2 |
+
|
3 |
+
These **needs** to be in global scope since Py2 doesn't support serializing
|
4 |
+
static methods.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import random
|
9 |
+
import os
|
10 |
+
import queue
|
11 |
+
from dataclasses import dataclass
|
12 |
+
from torch._utils import ExceptionWrapper
|
13 |
+
from typing import Optional, Union, TYPE_CHECKING
|
14 |
+
from . import signal_handling, MP_STATUS_CHECK_INTERVAL, IS_WINDOWS, HAS_NUMPY
|
15 |
+
if TYPE_CHECKING:
|
16 |
+
from torch.utils.data import Dataset
|
17 |
+
|
18 |
+
if IS_WINDOWS:
|
19 |
+
import ctypes
|
20 |
+
from ctypes.wintypes import DWORD, BOOL, HANDLE
|
21 |
+
|
22 |
+
# On Windows, the parent ID of the worker process remains unchanged when the manager process
|
23 |
+
# is gone, and the only way to check it through OS is to let the worker have a process handle
|
24 |
+
# of the manager and ask if the process status has changed.
|
25 |
+
class ManagerWatchdog:
|
26 |
+
def __init__(self):
|
27 |
+
self.manager_pid = os.getppid()
|
28 |
+
|
29 |
+
# mypy cannot detect this code is windows only
|
30 |
+
self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) # type: ignore[attr-defined]
|
31 |
+
self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
|
32 |
+
self.kernel32.OpenProcess.restype = HANDLE
|
33 |
+
self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)
|
34 |
+
self.kernel32.WaitForSingleObject.restype = DWORD
|
35 |
+
|
36 |
+
# Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx
|
37 |
+
SYNCHRONIZE = 0x00100000
|
38 |
+
self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid)
|
39 |
+
|
40 |
+
if not self.manager_handle:
|
41 |
+
raise ctypes.WinError(ctypes.get_last_error()) # type: ignore[attr-defined]
|
42 |
+
|
43 |
+
self.manager_dead = False
|
44 |
+
|
45 |
+
def is_alive(self):
|
46 |
+
if not self.manager_dead:
|
47 |
+
# Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx
|
48 |
+
self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0
|
49 |
+
return not self.manager_dead
|
50 |
+
else:
|
51 |
+
class ManagerWatchdog: # type: ignore[no-redef]
|
52 |
+
def __init__(self):
|
53 |
+
self.manager_pid = os.getppid()
|
54 |
+
self.manager_dead = False
|
55 |
+
|
56 |
+
def is_alive(self):
|
57 |
+
if not self.manager_dead:
|
58 |
+
self.manager_dead = os.getppid() != self.manager_pid
|
59 |
+
return not self.manager_dead
|
60 |
+
|
61 |
+
_worker_info: Optional["WorkerInfo"] = None
|
62 |
+
|
63 |
+
|
64 |
+
class WorkerInfo:
|
65 |
+
id: int
|
66 |
+
num_workers: int
|
67 |
+
seed: int
|
68 |
+
dataset: 'Dataset'
|
69 |
+
__initialized = False
|
70 |
+
|
71 |
+
def __init__(self, **kwargs):
|
72 |
+
for k, v in kwargs.items():
|
73 |
+
setattr(self, k, v)
|
74 |
+
self.__keys = tuple(kwargs.keys())
|
75 |
+
self.__initialized = True
|
76 |
+
|
77 |
+
def __setattr__(self, key, val):
|
78 |
+
if self.__initialized:
|
79 |
+
raise RuntimeError(f"Cannot assign attributes to {self.__class__.__name__} objects")
|
80 |
+
return super().__setattr__(key, val)
|
81 |
+
|
82 |
+
def __repr__(self):
|
83 |
+
items = []
|
84 |
+
for k in self.__keys:
|
85 |
+
items.append(f'{k}={getattr(self, k)}')
|
86 |
+
return f"{self.__class__.__name__}({', '.join(items)})"
|
87 |
+
|
88 |
+
|
89 |
+
def get_worker_info() -> Optional[WorkerInfo]:
|
90 |
+
r"""Returns the information about the current
|
91 |
+
:class:`~torch.utils.data.DataLoader` iterator worker process.
|
92 |
+
|
93 |
+
When called in a worker, this returns an object guaranteed to have the
|
94 |
+
following attributes:
|
95 |
+
|
96 |
+
* :attr:`id`: the current worker id.
|
97 |
+
* :attr:`num_workers`: the total number of workers.
|
98 |
+
* :attr:`seed`: the random seed set for the current worker. This value is
|
99 |
+
determined by main process RNG and the worker id. See
|
100 |
+
:class:`~torch.utils.data.DataLoader`'s documentation for more details.
|
101 |
+
* :attr:`dataset`: the copy of the dataset object in **this** process. Note
|
102 |
+
that this will be a different object in a different process than the one
|
103 |
+
in the main process.
|
104 |
+
|
105 |
+
When called in the main process, this returns ``None``.
|
106 |
+
|
107 |
+
.. note::
|
108 |
+
When used in a :attr:`worker_init_fn` passed over to
|
109 |
+
:class:`~torch.utils.data.DataLoader`, this method can be useful to
|
110 |
+
set up each worker process differently, for instance, using ``worker_id``
|
111 |
+
to configure the ``dataset`` object to only read a specific fraction of a
|
112 |
+
sharded dataset, or use ``seed`` to seed other libraries used in dataset
|
113 |
+
code.
|
114 |
+
"""
|
115 |
+
return _worker_info
|
116 |
+
|
117 |
+
|
118 |
+
r"""Dummy class used to signal the end of an IterableDataset"""
|
119 |
+
@dataclass(frozen=True)
|
120 |
+
class _IterableDatasetStopIteration:
|
121 |
+
worker_id: int
|
122 |
+
|
123 |
+
r"""Dummy class used to resume the fetching when worker reuse is enabled"""
|
124 |
+
@dataclass(frozen=True)
|
125 |
+
class _ResumeIteration:
|
126 |
+
seed: Optional[int] = None
|
127 |
+
|
128 |
+
# The function `_generate_state` is adapted from `numpy.random.SeedSequence`
|
129 |
+
# from https://github.com/numpy/numpy/blob/main/numpy/random/bit_generator.pyx
|
130 |
+
# It's MIT licensed, here is the copyright:
|
131 |
+
|
132 |
+
# Copyright (c) 2015 Melissa E. O'Neill
|
133 |
+
# Copyright (c) 2019 NumPy Developers
|
134 |
+
#
|
135 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
136 |
+
# of this software and associated documentation files (the "Software"), to deal
|
137 |
+
# in the Software without restriction, including without limitation the rights
|
138 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
139 |
+
# copies of the Software, and to permit persons to whom the Software is
|
140 |
+
# furnished to do so, subject to the following conditions:
|
141 |
+
#
|
142 |
+
# The above copyright notice and this permission notice shall be included in
|
143 |
+
# all copies or substantial portions of the Software.
|
144 |
+
#
|
145 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
146 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
147 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
148 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
149 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
150 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
151 |
+
# SOFTWARE.
|
152 |
+
|
153 |
+
# This function generates an array of int32 as the seed for
|
154 |
+
# `numpy.random`, in order to prevent state collision due to same
|
155 |
+
# seed and algorithm for `numpy.random` and `random` modules.
|
156 |
+
# TODO: Implement `SeedSequence` like object for `torch.random`
|
157 |
+
def _generate_state(base_seed, worker_id):
|
158 |
+
INIT_A = 0x43b0d7e5
|
159 |
+
MULT_A = 0x931e8875
|
160 |
+
INIT_B = 0x8b51f9dd
|
161 |
+
MULT_B = 0x58f38ded
|
162 |
+
MIX_MULT_L = 0xca01f9dd
|
163 |
+
MIX_MULT_R = 0x4973f715
|
164 |
+
XSHIFT = 4 * 8 // 2
|
165 |
+
MASK32 = 0xFFFFFFFF
|
166 |
+
|
167 |
+
entropy = [worker_id, base_seed & MASK32, base_seed >> 32, 0]
|
168 |
+
pool = [0] * 4
|
169 |
+
|
170 |
+
hash_const_A = INIT_A
|
171 |
+
|
172 |
+
def hash(value):
|
173 |
+
nonlocal hash_const_A
|
174 |
+
value = (value ^ hash_const_A) & MASK32
|
175 |
+
hash_const_A = (hash_const_A * MULT_A) & MASK32
|
176 |
+
value = (value * hash_const_A) & MASK32
|
177 |
+
value = (value ^ (value >> XSHIFT)) & MASK32
|
178 |
+
return value
|
179 |
+
|
180 |
+
def mix(x, y):
|
181 |
+
result_x = (MIX_MULT_L * x) & MASK32
|
182 |
+
result_y = (MIX_MULT_R * y) & MASK32
|
183 |
+
result = (result_x - result_y) & MASK32
|
184 |
+
result = (result ^ (result >> XSHIFT)) & MASK32
|
185 |
+
return result
|
186 |
+
|
187 |
+
# Add in the entropy to the pool.
|
188 |
+
for i in range(len(pool)):
|
189 |
+
pool[i] = hash(entropy[i])
|
190 |
+
|
191 |
+
# Mix all bits together so late bits can affect earlier bits.
|
192 |
+
for i_src in range(len(pool)):
|
193 |
+
for i_dst in range(len(pool)):
|
194 |
+
if i_src != i_dst:
|
195 |
+
pool[i_dst] = mix(pool[i_dst], hash(pool[i_src]))
|
196 |
+
|
197 |
+
hash_const_B = INIT_B
|
198 |
+
state = []
|
199 |
+
for i_dst in range(4):
|
200 |
+
data_val = pool[i_dst]
|
201 |
+
data_val = (data_val ^ hash_const_B) & MASK32
|
202 |
+
hash_const_B = (hash_const_B * MULT_B) & MASK32
|
203 |
+
data_val = (data_val * hash_const_B) & MASK32
|
204 |
+
data_val = (data_val ^ (data_val >> XSHIFT)) & MASK32
|
205 |
+
state.append(data_val)
|
206 |
+
return state
|
207 |
+
|
208 |
+
def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event,
|
209 |
+
auto_collation, collate_fn, drop_last, base_seed, init_fn, worker_id,
|
210 |
+
num_workers, persistent_workers, shared_seed):
|
211 |
+
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
|
212 |
+
# logic of this function.
|
213 |
+
|
214 |
+
try:
|
215 |
+
# Initialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
|
216 |
+
# module's handlers are executed after Python returns from C low-level
|
217 |
+
# handlers, likely when the same fatal signal had already happened
|
218 |
+
# again.
|
219 |
+
# https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers
|
220 |
+
signal_handling._set_worker_signal_handlers()
|
221 |
+
|
222 |
+
torch.set_num_threads(1)
|
223 |
+
seed = base_seed + worker_id
|
224 |
+
random.seed(seed)
|
225 |
+
torch.manual_seed(seed)
|
226 |
+
if HAS_NUMPY:
|
227 |
+
np_seed = _generate_state(base_seed, worker_id)
|
228 |
+
import numpy as np
|
229 |
+
np.random.seed(np_seed)
|
230 |
+
|
231 |
+
from torch.utils.data import IterDataPipe
|
232 |
+
from torch.utils.data.graph_settings import apply_random_seed
|
233 |
+
|
234 |
+
shared_rng = torch.Generator()
|
235 |
+
if isinstance(dataset, IterDataPipe):
|
236 |
+
assert shared_seed is not None
|
237 |
+
shared_rng.manual_seed(shared_seed)
|
238 |
+
dataset = apply_random_seed(dataset, shared_rng)
|
239 |
+
|
240 |
+
global _worker_info
|
241 |
+
_worker_info = WorkerInfo(id=worker_id, num_workers=num_workers,
|
242 |
+
seed=seed, dataset=dataset)
|
243 |
+
|
244 |
+
from torch.utils.data import _DatasetKind
|
245 |
+
|
246 |
+
init_exception = None
|
247 |
+
|
248 |
+
try:
|
249 |
+
if init_fn is not None:
|
250 |
+
init_fn(worker_id)
|
251 |
+
|
252 |
+
fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, auto_collation, collate_fn, drop_last)
|
253 |
+
except Exception:
|
254 |
+
init_exception = ExceptionWrapper(
|
255 |
+
where=f"in DataLoader worker process {worker_id}")
|
256 |
+
|
257 |
+
# When using Iterable mode, some worker can exit earlier than others due
|
258 |
+
# to the IterableDataset behaving differently for different workers.
|
259 |
+
# When such things happen, an `_IterableDatasetStopIteration` object is
|
260 |
+
# sent over to the main process with the ID of this worker, so that the
|
261 |
+
# main process won't send more tasks to this worker, and will send
|
262 |
+
# `None` to this worker to properly exit it.
|
263 |
+
#
|
264 |
+
# Note that we cannot set `done_event` from a worker as it is shared
|
265 |
+
# among all processes. Instead, we set the `iteration_end` flag to
|
266 |
+
# signify that the iterator is exhausted. When either `done_event` or
|
267 |
+
# `iteration_end` is set, we skip all processing step and just wait for
|
268 |
+
# `None`.
|
269 |
+
iteration_end = False
|
270 |
+
|
271 |
+
watchdog = ManagerWatchdog()
|
272 |
+
|
273 |
+
while watchdog.is_alive():
|
274 |
+
try:
|
275 |
+
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
|
276 |
+
except queue.Empty:
|
277 |
+
continue
|
278 |
+
if isinstance(r, _ResumeIteration):
|
279 |
+
# Acknowledge the main process
|
280 |
+
data_queue.put((r, None))
|
281 |
+
iteration_end = False
|
282 |
+
|
283 |
+
if isinstance(dataset, IterDataPipe):
|
284 |
+
assert r.seed is not None
|
285 |
+
shared_rng.manual_seed(r.seed)
|
286 |
+
dataset = apply_random_seed(dataset, shared_rng)
|
287 |
+
|
288 |
+
# Recreate the fetcher for worker-reuse policy
|
289 |
+
fetcher = _DatasetKind.create_fetcher(
|
290 |
+
dataset_kind, dataset, auto_collation, collate_fn, drop_last)
|
291 |
+
continue
|
292 |
+
elif r is None:
|
293 |
+
# Received the final signal
|
294 |
+
assert done_event.is_set() or iteration_end
|
295 |
+
break
|
296 |
+
elif done_event.is_set() or iteration_end:
|
297 |
+
# `done_event` is set. But I haven't received the final signal
|
298 |
+
# (None) yet. I will keep continuing until get it, and skip the
|
299 |
+
# processing steps.
|
300 |
+
continue
|
301 |
+
idx, index = r
|
302 |
+
data: Union[_IterableDatasetStopIteration, ExceptionWrapper]
|
303 |
+
if init_exception is not None:
|
304 |
+
data = init_exception
|
305 |
+
init_exception = None
|
306 |
+
else:
|
307 |
+
try:
|
308 |
+
data = fetcher.fetch(index) # type: ignore[possibly-undefined]
|
309 |
+
except Exception as e:
|
310 |
+
if isinstance(e, StopIteration) and dataset_kind == _DatasetKind.Iterable:
|
311 |
+
data = _IterableDatasetStopIteration(worker_id)
|
312 |
+
# Set `iteration_end`
|
313 |
+
# (1) to save future `next(...)` calls, and
|
314 |
+
# (2) to avoid sending multiple `_IterableDatasetStopIteration`s.
|
315 |
+
iteration_end = True
|
316 |
+
else:
|
317 |
+
# It is important that we don't store exc_info in a variable.
|
318 |
+
# `ExceptionWrapper` does the correct thing.
|
319 |
+
# See NOTE [ Python Traceback Reference Cycle Problem ]
|
320 |
+
data = ExceptionWrapper(
|
321 |
+
where=f"in DataLoader worker process {worker_id}")
|
322 |
+
data_queue.put((idx, data))
|
323 |
+
del data, idx, index, r # save memory
|
324 |
+
except KeyboardInterrupt:
|
325 |
+
# Main process will raise KeyboardInterrupt anyways.
|
326 |
+
pass
|
327 |
+
if done_event.is_set():
|
328 |
+
data_queue.cancel_join_thread()
|
329 |
+
data_queue.close()
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (281 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc
ADDED
Binary file (6.07 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_hook_iterator.cpython-310.pyc
ADDED
Binary file (8.46 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc
ADDED
Binary file (16.1 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/gen_pyi.cpython-310.pyc
ADDED
Binary file (8.62 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__init__.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.utils.data.datapipes.iter.utils import (
|
2 |
+
IterableWrapperIterDataPipe as IterableWrapper,
|
3 |
+
)
|
4 |
+
from torch.utils.data.datapipes.iter.callable import (
|
5 |
+
CollatorIterDataPipe as Collator,
|
6 |
+
MapperIterDataPipe as Mapper,
|
7 |
+
)
|
8 |
+
from torch.utils.data.datapipes.iter.combinatorics import (
|
9 |
+
SamplerIterDataPipe as Sampler,
|
10 |
+
ShufflerIterDataPipe as Shuffler,
|
11 |
+
)
|
12 |
+
from torch.utils.data.datapipes.iter.combining import (
|
13 |
+
ConcaterIterDataPipe as Concater,
|
14 |
+
DemultiplexerIterDataPipe as Demultiplexer,
|
15 |
+
ForkerIterDataPipe as Forker,
|
16 |
+
MultiplexerIterDataPipe as Multiplexer,
|
17 |
+
ZipperIterDataPipe as Zipper,
|
18 |
+
)
|
19 |
+
from torch.utils.data.datapipes.iter.filelister import (
|
20 |
+
FileListerIterDataPipe as FileLister,
|
21 |
+
)
|
22 |
+
from torch.utils.data.datapipes.iter.fileopener import (
|
23 |
+
FileOpenerIterDataPipe as FileOpener,
|
24 |
+
)
|
25 |
+
from torch.utils.data.datapipes.iter.grouping import (
|
26 |
+
BatcherIterDataPipe as Batcher,
|
27 |
+
GrouperIterDataPipe as Grouper,
|
28 |
+
UnBatcherIterDataPipe as UnBatcher,
|
29 |
+
)
|
30 |
+
from torch.utils.data.datapipes.iter.sharding import (
|
31 |
+
ShardingFilterIterDataPipe as ShardingFilter,
|
32 |
+
)
|
33 |
+
from torch.utils.data.datapipes.iter.routeddecoder import (
|
34 |
+
RoutedDecoderIterDataPipe as RoutedDecoder,
|
35 |
+
)
|
36 |
+
from torch.utils.data.datapipes.iter.selecting import (
|
37 |
+
FilterIterDataPipe as Filter,
|
38 |
+
)
|
39 |
+
from torch.utils.data.datapipes.iter.streamreader import (
|
40 |
+
StreamReaderIterDataPipe as StreamReader,
|
41 |
+
)
|
42 |
+
|
43 |
+
__all__ = ['Batcher',
|
44 |
+
'Collator',
|
45 |
+
'Concater',
|
46 |
+
'Demultiplexer',
|
47 |
+
'FileLister',
|
48 |
+
'FileOpener',
|
49 |
+
'Filter',
|
50 |
+
'Forker',
|
51 |
+
'Grouper',
|
52 |
+
'IterableWrapper',
|
53 |
+
'Mapper',
|
54 |
+
'Multiplexer',
|
55 |
+
'RoutedDecoder',
|
56 |
+
'Sampler',
|
57 |
+
'ShardingFilter',
|
58 |
+
'Shuffler',
|
59 |
+
'StreamReader',
|
60 |
+
'UnBatcher',
|
61 |
+
'Zipper']
|
62 |
+
|
63 |
+
# Please keep this list sorted
|
64 |
+
assert __all__ == sorted(__all__)
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.78 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/callable.cpython-310.pyc
ADDED
Binary file (7.83 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combinatorics.cpython-310.pyc
ADDED
Binary file (6.45 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combining.cpython-310.pyc
ADDED
Binary file (25.2 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/filelister.cpython-310.pyc
ADDED
Binary file (2.89 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/fileopener.cpython-310.pyc
ADDED
Binary file (2.85 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/grouping.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/routeddecoder.cpython-310.pyc
ADDED
Binary file (3.23 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/selecting.cpython-310.pyc
ADDED
Binary file (3.42 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/sharding.cpython-310.pyc
ADDED
Binary file (3.41 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/streamreader.cpython-310.pyc
ADDED
Binary file (1.81 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (1.99 kB). View file
|
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combinatorics.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from torch.utils.data import Sampler, SequentialSampler
|
5 |
+
from torch.utils.data.datapipes._decorator import functional_datapipe
|
6 |
+
from torch.utils.data.datapipes.datapipe import IterDataPipe
|
7 |
+
from typing import Dict, Iterator, List, Optional, Sized, Tuple, Type, TypeVar
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"SamplerIterDataPipe",
|
11 |
+
"ShufflerIterDataPipe",
|
12 |
+
]
|
13 |
+
|
14 |
+
T_co = TypeVar('T_co', covariant=True)
|
15 |
+
|
16 |
+
|
17 |
+
class SamplerIterDataPipe(IterDataPipe[T_co]):
|
18 |
+
r"""
|
19 |
+
Generate sample elements using the provided ``Sampler`` (defaults to :class:`SequentialSampler`).
|
20 |
+
|
21 |
+
Args:
|
22 |
+
datapipe: IterDataPipe to sample from
|
23 |
+
sampler: Sampler class to generate sample elements from input DataPipe.
|
24 |
+
Default is :class:`SequentialSampler` for IterDataPipe
|
25 |
+
"""
|
26 |
+
|
27 |
+
datapipe: IterDataPipe
|
28 |
+
sampler: Sampler
|
29 |
+
|
30 |
+
def __init__(self,
|
31 |
+
datapipe: IterDataPipe,
|
32 |
+
sampler: Type[Sampler] = SequentialSampler,
|
33 |
+
sampler_args: Optional[Tuple] = None,
|
34 |
+
sampler_kwargs: Optional[Dict] = None
|
35 |
+
) -> None:
|
36 |
+
assert isinstance(datapipe, Sized), \
|
37 |
+
"Sampler class requires input datapipe implemented `__len__`"
|
38 |
+
super().__init__()
|
39 |
+
self.datapipe = datapipe
|
40 |
+
self.sampler_args = () if sampler_args is None else sampler_args
|
41 |
+
self.sampler_kwargs = {} if sampler_kwargs is None else sampler_kwargs
|
42 |
+
# https://github.com/python/mypy/pull/9629 will solve
|
43 |
+
self.sampler = sampler(*self.sampler_args, data_source=self.datapipe, **self.sampler_kwargs) # type: ignore[misc]
|
44 |
+
|
45 |
+
def __iter__(self) -> Iterator[T_co]:
|
46 |
+
return iter(self.sampler)
|
47 |
+
|
48 |
+
def __len__(self) -> int:
|
49 |
+
# Dataset has been tested as `Sized`
|
50 |
+
if isinstance(self.sampler, Sized):
|
51 |
+
return len(self.sampler)
|
52 |
+
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
53 |
+
|
54 |
+
|
55 |
+
@functional_datapipe('shuffle')
|
56 |
+
class ShufflerIterDataPipe(IterDataPipe[T_co]):
|
57 |
+
r"""
|
58 |
+
Shuffle the input DataPipe with a buffer (functional name: ``shuffle``).
|
59 |
+
|
60 |
+
The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then,
|
61 |
+
each item will be yielded from the buffer by reservoir sampling via iterator.
|
62 |
+
|
63 |
+
``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the
|
64 |
+
datapipe is not shuffled. In order to fully shuffle all elements from datapipe,
|
65 |
+
``buffer_size`` is required to be greater than or equal to the size of datapipe.
|
66 |
+
|
67 |
+
When it is used with :class:`torch.utils.data.DataLoader`, the methods to
|
68 |
+
set up random seed are different based on :attr:`num_workers`.
|
69 |
+
|
70 |
+
For single-process mode (:attr:`num_workers == 0`), the random seed is set before
|
71 |
+
the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
|
72 |
+
mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed
|
73 |
+
for each worker process.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
datapipe: The IterDataPipe being shuffled
|
77 |
+
buffer_size: The buffer size for shuffling (default to ``10000``)
|
78 |
+
unbatch_level: Specifies if it is necessary to unbatch source data before
|
79 |
+
applying the shuffle
|
80 |
+
|
81 |
+
Example:
|
82 |
+
>>> # xdoctest: +SKIP
|
83 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
84 |
+
>>> dp = IterableWrapper(range(10))
|
85 |
+
>>> shuffle_dp = dp.shuffle()
|
86 |
+
>>> list(shuffle_dp)
|
87 |
+
[0, 4, 1, 6, 3, 2, 9, 5, 7, 8]
|
88 |
+
"""
|
89 |
+
|
90 |
+
datapipe: IterDataPipe[T_co]
|
91 |
+
buffer_size: int
|
92 |
+
_buffer: List[T_co]
|
93 |
+
_enabled: bool
|
94 |
+
_seed: Optional[int]
|
95 |
+
_rng: random.Random
|
96 |
+
|
97 |
+
def __init__(self,
|
98 |
+
datapipe: IterDataPipe[T_co],
|
99 |
+
*,
|
100 |
+
buffer_size: int = 10000,
|
101 |
+
unbatch_level: int = 0
|
102 |
+
) -> None:
|
103 |
+
super().__init__()
|
104 |
+
# TODO: Performance optimization
|
105 |
+
# buffer can be a fixed size and remove expensive `append()` and `len()` operations
|
106 |
+
self._buffer: List[T_co] = []
|
107 |
+
assert buffer_size > 0, "buffer_size should be larger than 0"
|
108 |
+
if unbatch_level == 0:
|
109 |
+
self.datapipe = datapipe
|
110 |
+
else:
|
111 |
+
self.datapipe = datapipe.unbatch(unbatch_level=unbatch_level)
|
112 |
+
self.buffer_size = buffer_size
|
113 |
+
self._enabled = True
|
114 |
+
self._seed = None
|
115 |
+
self._rng = random.Random()
|
116 |
+
|
117 |
+
def set_shuffle(self, shuffle=True):
|
118 |
+
self._enabled = shuffle
|
119 |
+
return self
|
120 |
+
|
121 |
+
def set_seed(self, seed: int):
|
122 |
+
self._seed = seed
|
123 |
+
return self
|
124 |
+
|
125 |
+
def __iter__(self) -> Iterator[T_co]:
|
126 |
+
if not self._enabled:
|
127 |
+
yield from self.datapipe
|
128 |
+
else:
|
129 |
+
for x in self.datapipe:
|
130 |
+
if len(self._buffer) == self.buffer_size:
|
131 |
+
idx = self._rng.randint(0, len(self._buffer) - 1)
|
132 |
+
val, self._buffer[idx] = self._buffer[idx], x
|
133 |
+
yield val
|
134 |
+
else:
|
135 |
+
self._buffer.append(x)
|
136 |
+
while self._buffer:
|
137 |
+
idx = self._rng.randint(0, len(self._buffer) - 1)
|
138 |
+
yield self._buffer.pop(idx)
|
139 |
+
|
140 |
+
def __len__(self) -> int:
|
141 |
+
if isinstance(self.datapipe, Sized):
|
142 |
+
return len(self.datapipe)
|
143 |
+
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
144 |
+
|
145 |
+
def reset(self) -> None:
|
146 |
+
self._buffer = []
|
147 |
+
if self._enabled:
|
148 |
+
if self._seed is None:
|
149 |
+
self._seed = int(torch.empty((), dtype=torch.int64).random_().item())
|
150 |
+
self._rng.seed(self._seed)
|
151 |
+
self._seed = None
|
152 |
+
|
153 |
+
def __getstate__(self):
|
154 |
+
state = (
|
155 |
+
self.datapipe,
|
156 |
+
self.buffer_size,
|
157 |
+
self._enabled,
|
158 |
+
self._seed,
|
159 |
+
self._buffer,
|
160 |
+
self._rng.getstate(),
|
161 |
+
self._valid_iterator_id,
|
162 |
+
self._number_of_samples_yielded,
|
163 |
+
)
|
164 |
+
if IterDataPipe.getstate_hook is not None:
|
165 |
+
return IterDataPipe.getstate_hook(state)
|
166 |
+
return state
|
167 |
+
|
168 |
+
def __setstate__(self, state):
|
169 |
+
(
|
170 |
+
self.datapipe,
|
171 |
+
self.buffer_size,
|
172 |
+
self._enabled,
|
173 |
+
self._seed,
|
174 |
+
self._buffer,
|
175 |
+
rng_state,
|
176 |
+
self._valid_iterator_id,
|
177 |
+
self._number_of_samples_yielded,
|
178 |
+
) = state
|
179 |
+
self._rng = random.Random()
|
180 |
+
self._rng.setstate(rng_state)
|
181 |
+
|
182 |
+
def __del__(self):
|
183 |
+
self._buffer.clear()
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combining.py
ADDED
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
|
3 |
+
from abc import ABC, abstractmethod
|
4 |
+
from collections import deque
|
5 |
+
import copy as copymodule
|
6 |
+
from typing import Any, Callable, Iterator, List, Literal, Optional, Sized, Tuple, TypeVar, Deque
|
7 |
+
|
8 |
+
from torch.utils.data.datapipes._decorator import functional_datapipe
|
9 |
+
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
|
10 |
+
from torch.utils.data.datapipes.datapipe import IterDataPipe
|
11 |
+
from torch.utils.data.datapipes.utils.common import StreamWrapper, _check_unpickable_fn
|
12 |
+
|
13 |
+
__all__ = [
|
14 |
+
"ConcaterIterDataPipe",
|
15 |
+
"DemultiplexerIterDataPipe",
|
16 |
+
"ForkerIterDataPipe",
|
17 |
+
"MultiplexerIterDataPipe",
|
18 |
+
"ZipperIterDataPipe",
|
19 |
+
]
|
20 |
+
|
21 |
+
T_co = TypeVar('T_co', covariant=True)
|
22 |
+
|
23 |
+
|
24 |
+
@functional_datapipe('concat')
|
25 |
+
class ConcaterIterDataPipe(IterDataPipe):
|
26 |
+
r"""
|
27 |
+
Concatenates multiple Iterable DataPipes (functional name: ``concat``).
|
28 |
+
|
29 |
+
The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
datapipes: Iterable DataPipes being concatenated
|
33 |
+
|
34 |
+
Example:
|
35 |
+
>>> # xdoctest: +REQUIRES(module:torchdata)
|
36 |
+
>>> import random
|
37 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
38 |
+
>>> dp1 = IterableWrapper(range(3))
|
39 |
+
>>> dp2 = IterableWrapper(range(5))
|
40 |
+
>>> list(dp1.concat(dp2))
|
41 |
+
[0, 1, 2, 0, 1, 2, 3, 4]
|
42 |
+
"""
|
43 |
+
|
44 |
+
datapipes: Tuple[IterDataPipe]
|
45 |
+
|
46 |
+
def __init__(self, *datapipes: IterDataPipe):
|
47 |
+
if len(datapipes) == 0:
|
48 |
+
raise ValueError("Expected at least one DataPipe, but got nothing")
|
49 |
+
if not all(isinstance(dp, IterDataPipe) for dp in datapipes):
|
50 |
+
raise TypeError("Expected all inputs to be `IterDataPipe`")
|
51 |
+
self.datapipes = datapipes # type: ignore[assignment]
|
52 |
+
|
53 |
+
def __iter__(self) -> Iterator:
|
54 |
+
for dp in self.datapipes:
|
55 |
+
yield from dp
|
56 |
+
|
57 |
+
def __len__(self) -> int:
|
58 |
+
if all(isinstance(dp, Sized) for dp in self.datapipes):
|
59 |
+
return sum(len(dp) for dp in self.datapipes)
|
60 |
+
else:
|
61 |
+
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
62 |
+
|
63 |
+
|
64 |
+
@functional_datapipe('fork')
|
65 |
+
class ForkerIterDataPipe(IterDataPipe):
|
66 |
+
r"""
|
67 |
+
Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``).
|
68 |
+
|
69 |
+
Args:
|
70 |
+
datapipe: Iterable DataPipe being copied
|
71 |
+
num_instances: number of instances of the datapipe to create
|
72 |
+
buffer_size: this restricts how far ahead the leading child DataPipe
|
73 |
+
can read relative to the slowest child DataPipe.
|
74 |
+
Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
|
75 |
+
copy: copy strategy to use for items yielded by each branch. Supported
|
76 |
+
options are ``None`` for no copying, ``"shallow"`` for shallow object
|
77 |
+
copies, and ``"deep"`` for deep object copies. Defaults to ``None``.
|
78 |
+
|
79 |
+
Note:
|
80 |
+
All branches of the forked pipeline return the identical object unless
|
81 |
+
the copy parameter is supplied. If the object is mutable or contains
|
82 |
+
mutable objects, changing them in one branch will affect all others.
|
83 |
+
|
84 |
+
Example:
|
85 |
+
>>> # xdoctest: +REQUIRES(module:torchdata)
|
86 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
87 |
+
>>> source_dp = IterableWrapper(range(5))
|
88 |
+
>>> dp1, dp2 = source_dp.fork(num_instances=2)
|
89 |
+
>>> list(dp1)
|
90 |
+
[0, 1, 2, 3, 4]
|
91 |
+
>>> list(dp2)
|
92 |
+
[0, 1, 2, 3, 4]
|
93 |
+
"""
|
94 |
+
|
95 |
+
def __new__(
|
96 |
+
cls,
|
97 |
+
datapipe: IterDataPipe,
|
98 |
+
num_instances: int,
|
99 |
+
buffer_size: int = 1000,
|
100 |
+
copy: Optional[Literal["shallow", "deep"]] = None
|
101 |
+
):
|
102 |
+
if num_instances < 1:
|
103 |
+
raise ValueError(f"Expected `num_instances` larger than 0, but {num_instances} is found")
|
104 |
+
if num_instances == 1:
|
105 |
+
return datapipe
|
106 |
+
container = _ForkerIterDataPipe(datapipe, num_instances, buffer_size, copy) # type: ignore[abstract]
|
107 |
+
return [_ChildDataPipe(container, i) for i in range(num_instances)]
|
108 |
+
|
109 |
+
|
110 |
+
class _ContainerTemplate(ABC):
|
111 |
+
r"""Abstract class for container ``DataPipes``. The followings are three required methods."""
|
112 |
+
|
113 |
+
@abstractmethod
|
114 |
+
def get_next_element_by_instance(self, instance_id: int):
|
115 |
+
...
|
116 |
+
|
117 |
+
@abstractmethod
|
118 |
+
def is_every_instance_exhausted(self) -> bool:
|
119 |
+
...
|
120 |
+
|
121 |
+
@abstractmethod
|
122 |
+
def reset(self) -> None:
|
123 |
+
...
|
124 |
+
|
125 |
+
@abstractmethod
|
126 |
+
def get_length_by_instance(self, instance_id: int):
|
127 |
+
r"""Raise TypeError if it's not supposed to be implemented to support `list(datapipe)`."""
|
128 |
+
|
129 |
+
|
130 |
+
def _no_op(x):
|
131 |
+
return x
|
132 |
+
|
133 |
+
|
134 |
+
class _ForkerIterDataPipe(IterDataPipe, _ContainerTemplate):
|
135 |
+
r"""
|
136 |
+
Container to hold instance-specific information on behalf of ForkerIterDataPipe.
|
137 |
+
|
138 |
+
It tracks the state of its child DataPipes, maintains the buffer, and yields the next value
|
139 |
+
as requested by the child DataPipes.
|
140 |
+
"""
|
141 |
+
|
142 |
+
def __init__(
|
143 |
+
self,
|
144 |
+
datapipe: IterDataPipe,
|
145 |
+
num_instances: int,
|
146 |
+
buffer_size: int = 1000,
|
147 |
+
copy: Optional[Literal["shallow", "deep"]] = None
|
148 |
+
):
|
149 |
+
self.main_datapipe = datapipe
|
150 |
+
self._datapipe_iterator: Optional[Iterator[Any]] = None
|
151 |
+
self.num_instances = num_instances
|
152 |
+
self.buffer: Deque = deque()
|
153 |
+
self.buffer_size = buffer_size
|
154 |
+
if self.buffer_size < 0:
|
155 |
+
warnings.warn(
|
156 |
+
"Unlimited buffer size is set for `fork`, "
|
157 |
+
"please be aware of OOM at random places",
|
158 |
+
UserWarning
|
159 |
+
)
|
160 |
+
if copy is None:
|
161 |
+
self.copy_fn = _no_op
|
162 |
+
elif copy == "shallow":
|
163 |
+
self.copy_fn = copymodule.copy
|
164 |
+
elif copy == "deep":
|
165 |
+
self.copy_fn = copymodule.deepcopy
|
166 |
+
else:
|
167 |
+
raise ValueError(f"Unknown copy method `{copy}` requested, choose one of None, `shallow` or `deep`.")
|
168 |
+
|
169 |
+
self.child_pointers: List[int] = [0] * num_instances # Indicate the indices of the next element to get
|
170 |
+
self.slowest_ptr = 0 # The index to read by the slowest child
|
171 |
+
self.leading_ptr = 0 # The index to read by the fastest child
|
172 |
+
self.end_ptr: Optional[int] = None # The index to stop child
|
173 |
+
self._child_stop: List[bool] = [True for _ in range(num_instances)]
|
174 |
+
|
175 |
+
def __len__(self):
|
176 |
+
return len(self.main_datapipe)
|
177 |
+
|
178 |
+
def get_next_element_by_instance(self, instance_id: int):
|
179 |
+
if self._datapipe_iterator is None and self._child_stop[instance_id]:
|
180 |
+
self._datapipe_iterator = iter(self.main_datapipe)
|
181 |
+
self._snapshot_state = _SnapshotState.Iterating
|
182 |
+
for i in range(self.num_instances):
|
183 |
+
self._child_stop[i] = False
|
184 |
+
try:
|
185 |
+
while not self._child_stop[instance_id]:
|
186 |
+
self.child_pointers[instance_id] += 1
|
187 |
+
if self.end_ptr is not None and self.child_pointers[instance_id] == self.end_ptr:
|
188 |
+
self._child_stop[instance_id] = True
|
189 |
+
break
|
190 |
+
# Use buffer
|
191 |
+
if self.buffer and self.child_pointers[instance_id] <= self.leading_ptr:
|
192 |
+
idx = self.child_pointers[instance_id] - self.slowest_ptr - 1
|
193 |
+
return_val = self.buffer[idx]
|
194 |
+
else: # Retrieve one element from main datapipe
|
195 |
+
self.leading_ptr = self.child_pointers[instance_id]
|
196 |
+
try:
|
197 |
+
return_val = next(self._datapipe_iterator) # type: ignore[arg-type]
|
198 |
+
self.buffer.append(return_val)
|
199 |
+
except StopIteration:
|
200 |
+
self._child_stop[instance_id] = True
|
201 |
+
self._datapipe_iterator = None
|
202 |
+
self.end_ptr = self.leading_ptr
|
203 |
+
continue
|
204 |
+
if self.child_pointers[instance_id] == self.slowest_ptr + 1:
|
205 |
+
new_min = min(self.child_pointers) # Can optimize by avoiding the call to min()
|
206 |
+
if self.slowest_ptr < new_min:
|
207 |
+
self.slowest_ptr = new_min
|
208 |
+
self.buffer.popleft()
|
209 |
+
if self.buffer_size >= 0 and self.leading_ptr > self.buffer_size + self.slowest_ptr:
|
210 |
+
raise BufferError("ForkerIterDataPipe buffer overflow," +
|
211 |
+
f"buffer size {self.buffer_size} is insufficient.")
|
212 |
+
|
213 |
+
yield self.copy_fn(return_val) # type: ignore[possibly-undefined]
|
214 |
+
finally:
|
215 |
+
self._child_stop[instance_id] = True
|
216 |
+
# Cleanup _datapipe_iterator for the case that fork exits earlier
|
217 |
+
if all(self._child_stop):
|
218 |
+
self._datapipe_iterator = None
|
219 |
+
self._cleanup()
|
220 |
+
|
221 |
+
def is_every_instance_exhausted(self) -> bool:
|
222 |
+
return self.end_ptr is not None and all(self._child_stop)
|
223 |
+
|
224 |
+
def get_length_by_instance(self, instance_id: int) -> int:
|
225 |
+
return len(self.main_datapipe)
|
226 |
+
|
227 |
+
def reset(self) -> None:
|
228 |
+
self._datapipe_iterator = None
|
229 |
+
self.buffer = deque()
|
230 |
+
self.child_pointers = [0] * self.num_instances
|
231 |
+
self.slowest_ptr = 0
|
232 |
+
self.leading_ptr = 0
|
233 |
+
self.end_ptr = None
|
234 |
+
self._child_stop = [True for _ in range(self.num_instances)]
|
235 |
+
|
236 |
+
def __getstate__(self):
|
237 |
+
state = (
|
238 |
+
self.main_datapipe,
|
239 |
+
self.num_instances,
|
240 |
+
self.buffer_size,
|
241 |
+
self.copy_fn,
|
242 |
+
self._valid_iterator_id,
|
243 |
+
self._number_of_samples_yielded,
|
244 |
+
)
|
245 |
+
if IterDataPipe.getstate_hook is not None:
|
246 |
+
return IterDataPipe.getstate_hook(state)
|
247 |
+
return state
|
248 |
+
|
249 |
+
def __setstate__(self, state):
|
250 |
+
(
|
251 |
+
self.main_datapipe,
|
252 |
+
self.num_instances,
|
253 |
+
self.buffer_size,
|
254 |
+
self.copy_fn,
|
255 |
+
self._valid_iterator_id,
|
256 |
+
self._number_of_samples_yielded,
|
257 |
+
) = state
|
258 |
+
self._datapipe_iterator = None
|
259 |
+
self.buffer = deque()
|
260 |
+
self.child_pointers = [0] * self.num_instances
|
261 |
+
self.slowest_ptr = 0
|
262 |
+
self.leading_ptr = 0
|
263 |
+
self.end_ptr = None
|
264 |
+
self._child_stop = [True for _ in range(self.num_instances)]
|
265 |
+
|
266 |
+
def _cleanup(self):
|
267 |
+
while self.buffer:
|
268 |
+
d = self.buffer.popleft()
|
269 |
+
StreamWrapper.close_streams(d)
|
270 |
+
|
271 |
+
def __del__(self):
|
272 |
+
self._cleanup()
|
273 |
+
|
274 |
+
|
275 |
+
class _ChildDataPipe(IterDataPipe):
|
276 |
+
r"""
|
277 |
+
Iterable Datapipe that is a child of a main DataPipe.
|
278 |
+
|
279 |
+
The instance of this class will pass its instance_id to get the next value from its main DataPipe.
|
280 |
+
|
281 |
+
Note:
|
282 |
+
ChildDataPipe, like all other IterDataPipe, follows the single iterator per IterDataPipe constraint.
|
283 |
+
Since ChildDataPipes share a common buffer, when an iterator is created for one of the ChildDataPipes,
|
284 |
+
the previous iterators for all ChildDataPipes must be invalidated, with the exception when a ChildDataPipe
|
285 |
+
hasn't had an iterator created from it since the last invalidation. See the example below.
|
286 |
+
|
287 |
+
Example:
|
288 |
+
>>> # xdoctest: +REQUIRES(module:torchdata)
|
289 |
+
>>> # Singler Iterator per IteraDataPipe Invalidation
|
290 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
291 |
+
>>> source_dp = IterableWrapper(range(10))
|
292 |
+
>>> cdp1, cdp2 = source_dp.fork(num_instances=2)
|
293 |
+
>>> it1, it2 = iter(cdp1), iter(cdp2)
|
294 |
+
>>> it3 = iter(cdp1)
|
295 |
+
>>> # The line above invalidates `it1` and `it2`, and resets `ForkerIterDataPipe`.
|
296 |
+
>>> it4 = iter(cdp2)
|
297 |
+
>>> # The line above doesn't invalidate `it3`, because an iterator for `cdp2` hasn't been created since
|
298 |
+
>>> # the last invalidation.
|
299 |
+
|
300 |
+
Args:
|
301 |
+
main_datapipe: Main DataPipe with a method 'get_next_element_by_instance(instance_id)'
|
302 |
+
instance_id: integer identifier of this instance
|
303 |
+
"""
|
304 |
+
|
305 |
+
_is_child_datapipe: bool = True
|
306 |
+
|
307 |
+
def __init__(self, main_datapipe: IterDataPipe, instance_id: int):
|
308 |
+
assert isinstance(main_datapipe, _ContainerTemplate)
|
309 |
+
|
310 |
+
self.main_datapipe: IterDataPipe = main_datapipe
|
311 |
+
self.instance_id = instance_id
|
312 |
+
|
313 |
+
def __iter__(self):
|
314 |
+
# Note that the logic behind setting iterator ID and `reset` are handled within `hook_iterator`
|
315 |
+
# We want to separate the code for reset and yield, so that 'reset' executes before __next__ is called
|
316 |
+
return self.main_datapipe.get_next_element_by_instance(self.instance_id)
|
317 |
+
|
318 |
+
def __len__(self):
|
319 |
+
return self.main_datapipe.get_length_by_instance(self.instance_id)
|
320 |
+
|
321 |
+
# This method is called by `hook_iterator` in `_typing.py`.
|
322 |
+
def _set_main_datapipe_valid_iterator_id(self) -> int:
|
323 |
+
r"""
|
324 |
+
Update the valid iterator ID for both this DataPipe object and `main_datapipe`.
|
325 |
+
|
326 |
+
`main_datapipe.reset()` is called when the ID is incremented to a new generation.
|
327 |
+
"""
|
328 |
+
# 1. First time any child iterator is created
|
329 |
+
if self.main_datapipe._valid_iterator_id is None:
|
330 |
+
self.main_datapipe._valid_iterator_id = 0 # type: ignore[attr-defined]
|
331 |
+
# 2. This instance was already in the same generation as `main_datapipe`,
|
332 |
+
# we need to increment the ID further by 1
|
333 |
+
elif self.main_datapipe._valid_iterator_id == self._valid_iterator_id: # type: ignore[has-type]
|
334 |
+
self.main_datapipe._valid_iterator_id += 1 # type: ignore[attr-defined]
|
335 |
+
# Whenever a new generation of iterator is created, the `main_datapipe` must reset
|
336 |
+
if not self.main_datapipe.is_every_instance_exhausted():
|
337 |
+
warnings.warn("Some child DataPipes are not exhausted when __iter__ is called. We are resetting "
|
338 |
+
"the buffer and each child DataPipe will read from the start again.", UserWarning)
|
339 |
+
self.main_datapipe.reset()
|
340 |
+
# 3. Otherwise, the iterator is behind the others, so it will just need to catch up by setting
|
341 |
+
# the instance's iterator to match that of `main_datapipe`
|
342 |
+
self._valid_iterator_id = self.main_datapipe._valid_iterator_id
|
343 |
+
return self._valid_iterator_id
|
344 |
+
|
345 |
+
# This method is called by `hook_iterator` in `_typing.py`.
|
346 |
+
def _check_valid_iterator_id(self, iterator_id) -> bool:
|
347 |
+
r"""Check the valid iterator ID against that of DataPipe object and that of `main_datapipe`."""
|
348 |
+
return iterator_id == self._valid_iterator_id and iterator_id == self.main_datapipe._valid_iterator_id
|
349 |
+
|
350 |
+
|
351 |
+
@functional_datapipe('demux')
|
352 |
+
class DemultiplexerIterDataPipe(IterDataPipe):
|
353 |
+
r"""
|
354 |
+
Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``).
|
355 |
+
|
356 |
+
A list of the child DataPipes is returned from this operation.
|
357 |
+
|
358 |
+
Args:
|
359 |
+
datapipe: Iterable DataPipe being filtered
|
360 |
+
num_instances: number of instances of the DataPipe to create
|
361 |
+
classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None``
|
362 |
+
drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None``
|
363 |
+
buffer_size: this defines the maximum number of inputs that the buffer can hold across all child
|
364 |
+
DataPipes while waiting for their values to be yielded.
|
365 |
+
Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
|
366 |
+
|
367 |
+
Examples:
|
368 |
+
>>> # xdoctest: +REQUIRES(module:torchdata)
|
369 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
370 |
+
>>> def odd_or_even(n):
|
371 |
+
... return n % 2
|
372 |
+
>>> source_dp = IterableWrapper(range(5))
|
373 |
+
>>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even)
|
374 |
+
>>> list(dp1)
|
375 |
+
[0, 2, 4]
|
376 |
+
>>> list(dp2)
|
377 |
+
[1, 3]
|
378 |
+
>>> # It can also filter out any element that gets `None` from the `classifier_fn`
|
379 |
+
>>> def odd_or_even_no_zero(n):
|
380 |
+
... return n % 2 if n != 0 else None
|
381 |
+
>>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True)
|
382 |
+
>>> list(dp1)
|
383 |
+
[2, 4]
|
384 |
+
>>> list(dp2)
|
385 |
+
[1, 3]
|
386 |
+
"""
|
387 |
+
|
388 |
+
def __new__(cls, datapipe: IterDataPipe, num_instances: int,
|
389 |
+
classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000):
|
390 |
+
if num_instances < 1:
|
391 |
+
raise ValueError(f"Expected `num_instances` larger than 0, but {num_instances} is found")
|
392 |
+
|
393 |
+
_check_unpickable_fn(classifier_fn)
|
394 |
+
|
395 |
+
# When num_instances == 1, demux can be replaced by filter,
|
396 |
+
# but keep it as Demultiplexer for the sake of consistency
|
397 |
+
# like throwing Error when classification result is out of o range
|
398 |
+
container = _DemultiplexerIterDataPipe(datapipe, num_instances, classifier_fn, drop_none, buffer_size) # type: ignore[abstract]
|
399 |
+
return [_ChildDataPipe(container, i) for i in range(num_instances)]
|
400 |
+
|
401 |
+
|
402 |
+
class _DemultiplexerIterDataPipe(IterDataPipe, _ContainerTemplate):
|
403 |
+
r"""
|
404 |
+
Container to hold instance-specific information on behalf of DemultiplexerIterDataPipe.
|
405 |
+
|
406 |
+
It tracks the state of its child DataPipes, maintains the buffer, classifies and yields the next correct value
|
407 |
+
as requested by the child DataPipes.
|
408 |
+
"""
|
409 |
+
|
410 |
+
def __init__(self, datapipe: IterDataPipe[T_co], num_instances: int,
|
411 |
+
classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool, buffer_size: int):
|
412 |
+
self.main_datapipe = datapipe
|
413 |
+
self._datapipe_iterator: Optional[Iterator[Any]] = None
|
414 |
+
self.num_instances = num_instances
|
415 |
+
self.buffer_size = buffer_size
|
416 |
+
if self.buffer_size < 0:
|
417 |
+
warnings.warn(
|
418 |
+
"Unlimited buffer size is set for `demux`, "
|
419 |
+
"please be aware of OOM at random places",
|
420 |
+
UserWarning
|
421 |
+
)
|
422 |
+
self.current_buffer_usage = 0
|
423 |
+
self.child_buffers: List[Deque[T_co]] = [deque() for _ in range(num_instances)]
|
424 |
+
self.classifier_fn = classifier_fn
|
425 |
+
self.drop_none = drop_none
|
426 |
+
self.main_datapipe_exhausted = False
|
427 |
+
self._child_stop: List[bool] = [True for _ in range(num_instances)]
|
428 |
+
|
429 |
+
def _find_next(self, instance_id: int) -> T_co: # type: ignore[type-var]
|
430 |
+
while True:
|
431 |
+
if self.main_datapipe_exhausted or self._child_stop[instance_id]:
|
432 |
+
raise StopIteration
|
433 |
+
if self._datapipe_iterator is None:
|
434 |
+
raise ValueError(
|
435 |
+
"_datapipe_iterator has not been set, likely because this private method is called directly "
|
436 |
+
"without invoking get_next_element_by_instance() first.")
|
437 |
+
value = next(self._datapipe_iterator)
|
438 |
+
classification = self.classifier_fn(value)
|
439 |
+
if classification is None and self.drop_none:
|
440 |
+
StreamWrapper.close_streams(value)
|
441 |
+
continue
|
442 |
+
if classification is None or classification >= self.num_instances or classification < 0:
|
443 |
+
raise ValueError(f"Output of the classification fn should be between 0 and {self.num_instances - 1}. " +
|
444 |
+
f"{classification} is returned.")
|
445 |
+
if classification == instance_id:
|
446 |
+
return value
|
447 |
+
self.child_buffers[classification].append(value)
|
448 |
+
self.current_buffer_usage += 1
|
449 |
+
if self.buffer_size >= 0 and self.current_buffer_usage > self.buffer_size:
|
450 |
+
raise BufferError(
|
451 |
+
f"DemultiplexerIterDataPipe buffer overflow, buffer size {self.buffer_size} is insufficient.")
|
452 |
+
|
453 |
+
def get_next_element_by_instance(self, instance_id: int):
|
454 |
+
if self._datapipe_iterator is None and self._child_stop[instance_id]:
|
455 |
+
self._datapipe_iterator = iter(self.main_datapipe)
|
456 |
+
self._snapshot_state = _SnapshotState.Iterating # This is necessary for the DataPipe to reset properly.
|
457 |
+
self.main_datapipe_exhausted = False
|
458 |
+
for i in range(self.num_instances):
|
459 |
+
self._child_stop[i] = False
|
460 |
+
|
461 |
+
try:
|
462 |
+
while not self._child_stop[instance_id]:
|
463 |
+
if self.child_buffers[instance_id]:
|
464 |
+
self.current_buffer_usage -= 1
|
465 |
+
yield self.child_buffers[instance_id].popleft()
|
466 |
+
else:
|
467 |
+
try:
|
468 |
+
yield self._find_next(instance_id)
|
469 |
+
except StopIteration:
|
470 |
+
self._child_stop[instance_id] = True
|
471 |
+
self.main_datapipe_exhausted = True
|
472 |
+
self._datapipe_iterator = None
|
473 |
+
finally:
|
474 |
+
self._child_stop[instance_id] = True
|
475 |
+
# Cleanup _datapipe_iterator for the case that demux exits earlier
|
476 |
+
if all(self._child_stop):
|
477 |
+
self._datapipe_iterator = None
|
478 |
+
if self.child_buffers[instance_id]:
|
479 |
+
self._cleanup(instance_id)
|
480 |
+
|
481 |
+
def is_every_instance_exhausted(self) -> bool:
|
482 |
+
return self.main_datapipe_exhausted and all(self._child_stop)
|
483 |
+
|
484 |
+
def get_length_by_instance(self, instance_id: int) -> int:
|
485 |
+
raise TypeError
|
486 |
+
|
487 |
+
def reset(self) -> None:
|
488 |
+
self._datapipe_iterator = None
|
489 |
+
self.current_buffer_usage = 0
|
490 |
+
self.child_buffers = [deque() for _ in range(self.num_instances)]
|
491 |
+
self._child_stop = [True for _ in range(self.num_instances)]
|
492 |
+
self.main_datapipe_exhausted = False
|
493 |
+
|
494 |
+
def __getstate__(self):
|
495 |
+
state = (
|
496 |
+
self.main_datapipe,
|
497 |
+
self.num_instances,
|
498 |
+
self.buffer_size,
|
499 |
+
self.classifier_fn,
|
500 |
+
self.drop_none,
|
501 |
+
self._valid_iterator_id,
|
502 |
+
self._number_of_samples_yielded,
|
503 |
+
)
|
504 |
+
if IterDataPipe.getstate_hook is not None:
|
505 |
+
return IterDataPipe.getstate_hook(state)
|
506 |
+
return state
|
507 |
+
|
508 |
+
def __setstate__(self, state):
|
509 |
+
(
|
510 |
+
self.main_datapipe,
|
511 |
+
self.num_instances,
|
512 |
+
self.buffer_size,
|
513 |
+
self.classifier_fn,
|
514 |
+
self.drop_none,
|
515 |
+
self._valid_iterator_id,
|
516 |
+
self._number_of_samples_yielded,
|
517 |
+
) = state
|
518 |
+
self._datapipe_iterator = None
|
519 |
+
self.current_buffer_usage = 0
|
520 |
+
self.child_buffers = [deque() for _ in range(self.num_instances)]
|
521 |
+
self._child_stop = [True for _ in range(self.num_instances)]
|
522 |
+
self.main_datapipe_exhausted = False
|
523 |
+
|
524 |
+
def _cleanup(self, instance_id: Optional[int] = None):
|
525 |
+
ids = range(self.num_instances) if instance_id is None else [instance_id, ]
|
526 |
+
for i in ids:
|
527 |
+
q = self.child_buffers[i]
|
528 |
+
while q:
|
529 |
+
d = q.popleft()
|
530 |
+
StreamWrapper.close_streams(d)
|
531 |
+
|
532 |
+
|
533 |
+
def __del__(self):
|
534 |
+
self._cleanup()
|
535 |
+
|
536 |
+
|
537 |
+
@functional_datapipe('mux')
|
538 |
+
class MultiplexerIterDataPipe(IterDataPipe):
|
539 |
+
r"""
|
540 |
+
Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``).
|
541 |
+
|
542 |
+
As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration,
|
543 |
+
and so on. It ends when the shortest input DataPipe is exhausted.
|
544 |
+
|
545 |
+
Args:
|
546 |
+
datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted
|
547 |
+
|
548 |
+
Example:
|
549 |
+
>>> # xdoctest: +REQUIRES(module:torchdata)
|
550 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
551 |
+
>>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
|
552 |
+
>>> list(dp1.mux(dp2, dp3))
|
553 |
+
[0, 10, 20, 1, 11, 21, 2, 12, 22]
|
554 |
+
"""
|
555 |
+
|
556 |
+
def __init__(self, *datapipes):
|
557 |
+
self.datapipes = datapipes
|
558 |
+
self.buffer: List = [] # Store values to be yielded only when every iterator provides one
|
559 |
+
|
560 |
+
def __iter__(self):
|
561 |
+
iterators = [iter(x) for x in self.datapipes]
|
562 |
+
while len(iterators):
|
563 |
+
for it in iterators:
|
564 |
+
try:
|
565 |
+
value = next(it)
|
566 |
+
self.buffer.append(value)
|
567 |
+
except StopIteration:
|
568 |
+
self.buffer.clear()
|
569 |
+
return
|
570 |
+
yield from self.buffer
|
571 |
+
self.buffer.clear()
|
572 |
+
|
573 |
+
def __len__(self):
|
574 |
+
if all(isinstance(dp, Sized) for dp in self.datapipes):
|
575 |
+
return min(len(dp) for dp in self.datapipes) * len(self.datapipes)
|
576 |
+
else:
|
577 |
+
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
578 |
+
|
579 |
+
def reset(self) -> None:
|
580 |
+
self.buffer = []
|
581 |
+
|
582 |
+
def __getstate__(self):
|
583 |
+
state = (
|
584 |
+
self.datapipes,
|
585 |
+
self._valid_iterator_id,
|
586 |
+
self._number_of_samples_yielded,
|
587 |
+
)
|
588 |
+
if IterDataPipe.getstate_hook is not None:
|
589 |
+
return IterDataPipe.getstate_hook(state)
|
590 |
+
return state
|
591 |
+
|
592 |
+
def __setstate__(self, state):
|
593 |
+
(
|
594 |
+
self.datapipes,
|
595 |
+
self._valid_iterator_id,
|
596 |
+
self._number_of_samples_yielded,
|
597 |
+
) = state
|
598 |
+
self.buffer = []
|
599 |
+
|
600 |
+
def __del__(self):
|
601 |
+
self.buffer.clear()
|
602 |
+
|
603 |
+
|
604 |
+
@functional_datapipe('zip')
|
605 |
+
class ZipperIterDataPipe(IterDataPipe[Tuple[T_co]]):
|
606 |
+
r"""
|
607 |
+
Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
|
608 |
+
|
609 |
+
The output is stopped as soon as the shortest input DataPipe is exhausted.
|
610 |
+
|
611 |
+
Args:
|
612 |
+
*datapipes: Iterable DataPipes being aggregated
|
613 |
+
|
614 |
+
Example:
|
615 |
+
>>> # xdoctest: +REQUIRES(module:torchdata)
|
616 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
617 |
+
>>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
|
618 |
+
>>> list(dp1.zip(dp2, dp3))
|
619 |
+
[(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)]
|
620 |
+
"""
|
621 |
+
|
622 |
+
datapipes: Tuple[IterDataPipe]
|
623 |
+
|
624 |
+
def __init__(self, *datapipes: IterDataPipe):
|
625 |
+
if not all(isinstance(dp, IterDataPipe) for dp in datapipes):
|
626 |
+
raise TypeError("All inputs are required to be `IterDataPipe` "
|
627 |
+
"for `ZipIterDataPipe`.")
|
628 |
+
super().__init__()
|
629 |
+
self.datapipes = datapipes # type: ignore[assignment]
|
630 |
+
|
631 |
+
def __iter__(self) -> Iterator[Tuple[T_co]]:
|
632 |
+
iterators = [iter(datapipe) for datapipe in self.datapipes]
|
633 |
+
yield from zip(*iterators)
|
634 |
+
|
635 |
+
def __len__(self) -> int:
|
636 |
+
if all(isinstance(dp, Sized) for dp in self.datapipes):
|
637 |
+
return min(len(dp) for dp in self.datapipes)
|
638 |
+
else:
|
639 |
+
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/filelister.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Iterator, List, Sequence, Union
|
2 |
+
|
3 |
+
|
4 |
+
from torch.utils.data.datapipes._decorator import functional_datapipe
|
5 |
+
|
6 |
+
from torch.utils.data.datapipes.datapipe import IterDataPipe
|
7 |
+
from torch.utils.data.datapipes.iter import IterableWrapper
|
8 |
+
from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root
|
9 |
+
|
10 |
+
__all__ = ["FileListerIterDataPipe", ]
|
11 |
+
|
12 |
+
|
13 |
+
@functional_datapipe("list_files")
|
14 |
+
class FileListerIterDataPipe(IterDataPipe[str]):
|
15 |
+
r"""
|
16 |
+
Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.
|
17 |
+
|
18 |
+
Multiple root directories can be provided (functional name: ``list_files``).
|
19 |
+
|
20 |
+
Args:
|
21 |
+
root: Root directory or a sequence of root directories
|
22 |
+
masks: Unix style filter string or string list for filtering file name(s)
|
23 |
+
recursive: Whether to return pathname from nested directories or not
|
24 |
+
abspath: Whether to return relative pathname or absolute pathname
|
25 |
+
non_deterministic: Whether to return pathname in sorted order or not.
|
26 |
+
If ``False``, the results yielded from each root directory will be sorted
|
27 |
+
length: Nominal length of the datapipe
|
28 |
+
|
29 |
+
Example:
|
30 |
+
>>> # xdoctest: +SKIP
|
31 |
+
>>> from torchdata.datapipes.iter import FileLister
|
32 |
+
>>> dp = FileLister(root=".", recursive=True)
|
33 |
+
>>> list(dp)
|
34 |
+
['example.py', './data/data.tar']
|
35 |
+
"""
|
36 |
+
|
37 |
+
def __init__(
|
38 |
+
self,
|
39 |
+
root: Union[str, Sequence[str], IterDataPipe] = '.',
|
40 |
+
masks: Union[str, List[str]] = '',
|
41 |
+
*,
|
42 |
+
recursive: bool = False,
|
43 |
+
abspath: bool = False,
|
44 |
+
non_deterministic: bool = False,
|
45 |
+
length: int = -1
|
46 |
+
) -> None:
|
47 |
+
super().__init__()
|
48 |
+
if isinstance(root, str):
|
49 |
+
root = [root, ]
|
50 |
+
if not isinstance(root, IterDataPipe):
|
51 |
+
root = IterableWrapper(root)
|
52 |
+
self.datapipe: IterDataPipe = root
|
53 |
+
self.masks: Union[str, List[str]] = masks
|
54 |
+
self.recursive: bool = recursive
|
55 |
+
self.abspath: bool = abspath
|
56 |
+
self.non_deterministic: bool = non_deterministic
|
57 |
+
self.length: int = length
|
58 |
+
|
59 |
+
def __iter__(self) -> Iterator[str] :
|
60 |
+
for path in self.datapipe:
|
61 |
+
yield from get_file_pathnames_from_root(path, self.masks, self.recursive, self.abspath, self.non_deterministic)
|
62 |
+
|
63 |
+
def __len__(self):
|
64 |
+
if self.length == -1:
|
65 |
+
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
66 |
+
return self.length
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/grouping.py
ADDED
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import warnings
|
2 |
+
from collections import defaultdict
|
3 |
+
from typing import Any, Callable, DefaultDict, Iterator, List, Optional, Sized, TypeVar
|
4 |
+
|
5 |
+
import torch.utils.data.datapipes.iter.sharding
|
6 |
+
|
7 |
+
from torch.utils.data.datapipes._decorator import functional_datapipe
|
8 |
+
from torch.utils.data.datapipes.datapipe import DataChunk, IterDataPipe
|
9 |
+
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
"BatcherIterDataPipe",
|
13 |
+
"GrouperIterDataPipe",
|
14 |
+
"UnBatcherIterDataPipe",
|
15 |
+
]
|
16 |
+
|
17 |
+
T_co = TypeVar("T_co", covariant=True)
|
18 |
+
|
19 |
+
def __getattr__(name: str):
|
20 |
+
if name in ["SHARDING_PRIORITIES", "ShardingFilterIterDataPipe"]:
|
21 |
+
warnings.warn(f"`{name}` from `torch.utils.data.datapipes.iter.grouping` is going to be removed in PyTorch 2.1"
|
22 |
+
f"Please use `{name}` from the `torch.utils.data.datapipes.iter.sharding`",
|
23 |
+
category=FutureWarning, stacklevel=2)
|
24 |
+
|
25 |
+
return getattr(torch.utils.data.datapipes.iter.sharding, name)
|
26 |
+
|
27 |
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
28 |
+
|
29 |
+
@functional_datapipe('batch')
|
30 |
+
class BatcherIterDataPipe(IterDataPipe[DataChunk]):
|
31 |
+
r"""
|
32 |
+
Creates mini-batches of data (functional name: ``batch``).
|
33 |
+
|
34 |
+
An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the
|
35 |
+
last batch if ``drop_last`` is set to ``False``.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
datapipe: Iterable DataPipe being batched
|
39 |
+
batch_size: The size of each batch
|
40 |
+
drop_last: Option to drop the last batch if it's not full
|
41 |
+
wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding,
|
42 |
+
defaults to ``DataChunk``
|
43 |
+
|
44 |
+
Example:
|
45 |
+
>>> # xdoctest: +SKIP
|
46 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
47 |
+
>>> dp = IterableWrapper(range(10))
|
48 |
+
>>> dp = dp.batch(batch_size=3, drop_last=True)
|
49 |
+
>>> list(dp)
|
50 |
+
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
|
51 |
+
"""
|
52 |
+
|
53 |
+
datapipe: IterDataPipe
|
54 |
+
batch_size: int
|
55 |
+
drop_last: bool
|
56 |
+
|
57 |
+
def __init__(self,
|
58 |
+
datapipe: IterDataPipe,
|
59 |
+
batch_size: int,
|
60 |
+
drop_last: bool = False,
|
61 |
+
wrapper_class=DataChunk,
|
62 |
+
) -> None:
|
63 |
+
assert batch_size > 0, "Batch size is required to be larger than 0!"
|
64 |
+
super().__init__()
|
65 |
+
self.datapipe = datapipe
|
66 |
+
self.batch_size = batch_size
|
67 |
+
self.drop_last = drop_last
|
68 |
+
self.wrapper_class = wrapper_class
|
69 |
+
|
70 |
+
def __iter__(self) -> Iterator[DataChunk]:
|
71 |
+
batch: List = []
|
72 |
+
for x in self.datapipe:
|
73 |
+
batch.append(x)
|
74 |
+
if len(batch) == self.batch_size:
|
75 |
+
yield self.wrapper_class(batch)
|
76 |
+
batch = []
|
77 |
+
if len(batch) > 0:
|
78 |
+
if not self.drop_last:
|
79 |
+
yield self.wrapper_class(batch)
|
80 |
+
|
81 |
+
def __len__(self) -> int:
|
82 |
+
if isinstance(self.datapipe, Sized):
|
83 |
+
if self.drop_last:
|
84 |
+
return len(self.datapipe) // self.batch_size
|
85 |
+
else:
|
86 |
+
return (len(self.datapipe) + self.batch_size - 1) // self.batch_size
|
87 |
+
else:
|
88 |
+
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
89 |
+
|
90 |
+
|
91 |
+
@functional_datapipe('unbatch')
|
92 |
+
class UnBatcherIterDataPipe(IterDataPipe):
|
93 |
+
r"""
|
94 |
+
Undos batching of data (functional name: ``unbatch``).
|
95 |
+
|
96 |
+
In other words, it flattens the data up to the specified level within a batched DataPipe.
|
97 |
+
|
98 |
+
Args:
|
99 |
+
datapipe: Iterable DataPipe being un-batched
|
100 |
+
unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``,
|
101 |
+
it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe.
|
102 |
+
|
103 |
+
Example:
|
104 |
+
>>> # xdoctest: +SKIP
|
105 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
106 |
+
>>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]])
|
107 |
+
>>> dp1 = source_dp.unbatch()
|
108 |
+
>>> list(dp1)
|
109 |
+
[[0, 1], [2], [3, 4], [5], [6]]
|
110 |
+
>>> dp2 = source_dp.unbatch(unbatch_level=2)
|
111 |
+
>>> list(dp2)
|
112 |
+
[0, 1, 2, 3, 4, 5, 6]
|
113 |
+
"""
|
114 |
+
|
115 |
+
def __init__(self,
|
116 |
+
datapipe: IterDataPipe,
|
117 |
+
unbatch_level: int = 1):
|
118 |
+
self.datapipe = datapipe
|
119 |
+
self.unbatch_level = unbatch_level
|
120 |
+
|
121 |
+
def __iter__(self):
|
122 |
+
for element in self.datapipe:
|
123 |
+
yield from self._dive(element, unbatch_level=self.unbatch_level)
|
124 |
+
|
125 |
+
def _dive(self, element, unbatch_level):
|
126 |
+
if unbatch_level < -1:
|
127 |
+
raise ValueError("unbatch_level must be -1 or >= 0")
|
128 |
+
if unbatch_level == -1:
|
129 |
+
if isinstance(element, (list, DataChunk)):
|
130 |
+
for item in element:
|
131 |
+
yield from self._dive(item, unbatch_level=-1)
|
132 |
+
else:
|
133 |
+
yield element
|
134 |
+
elif unbatch_level == 0:
|
135 |
+
yield element
|
136 |
+
else:
|
137 |
+
if isinstance(element, (list, DataChunk)):
|
138 |
+
for item in element:
|
139 |
+
yield from self._dive(item, unbatch_level=unbatch_level - 1)
|
140 |
+
else:
|
141 |
+
raise IndexError(f"unbatch_level {self.unbatch_level} exceeds the depth of the DataPipe")
|
142 |
+
|
143 |
+
|
144 |
+
@functional_datapipe('groupby')
|
145 |
+
class GrouperIterDataPipe(IterDataPipe[DataChunk]):
|
146 |
+
r"""
|
147 |
+
Groups data from IterDataPipe by keys from ``group_key_fn``, yielding a ``DataChunk`` with batch size up to ``group_size``.
|
148 |
+
|
149 |
+
(functional name: ``groupby``).
|
150 |
+
|
151 |
+
The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group
|
152 |
+
will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full,
|
153 |
+
the DataPipe will yield the largest batch with the same key, provided that its size is larger
|
154 |
+
than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``.
|
155 |
+
|
156 |
+
After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity
|
157 |
+
will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``.
|
158 |
+
|
159 |
+
Args:
|
160 |
+
datapipe: Iterable datapipe to be grouped
|
161 |
+
group_key_fn: Function used to generate group key from the data of the source datapipe
|
162 |
+
keep_key: Option to yield the matching key along with the items in a tuple,
|
163 |
+
resulting in `(key, [items])` otherwise returning [items]
|
164 |
+
buffer_size: The size of buffer for ungrouped data
|
165 |
+
group_size: The max size of each group, a batch is yielded as soon as it reaches this size
|
166 |
+
guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full
|
167 |
+
drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer
|
168 |
+
when the buffer is full
|
169 |
+
|
170 |
+
Example:
|
171 |
+
>>> import os
|
172 |
+
>>> # xdoctest: +SKIP
|
173 |
+
>>> from torchdata.datapipes.iter import IterableWrapper
|
174 |
+
>>> def group_fn(file):
|
175 |
+
... return os.path.basename(file).split(".")[0]
|
176 |
+
>>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"])
|
177 |
+
>>> dp0 = source_dp.groupby(group_key_fn=group_fn)
|
178 |
+
>>> list(dp0)
|
179 |
+
[['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']]
|
180 |
+
>>> # A group is yielded as soon as its size equals to `group_size`
|
181 |
+
>>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2)
|
182 |
+
>>> list(dp1)
|
183 |
+
[['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
|
184 |
+
>>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size`
|
185 |
+
>>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2)
|
186 |
+
>>> list(dp2)
|
187 |
+
[['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
|
188 |
+
"""
|
189 |
+
|
190 |
+
def __init__(self,
|
191 |
+
datapipe: IterDataPipe[T_co],
|
192 |
+
group_key_fn: Callable[[T_co], Any],
|
193 |
+
*,
|
194 |
+
keep_key: bool = False,
|
195 |
+
buffer_size: int = 10000,
|
196 |
+
group_size: Optional[int] = None,
|
197 |
+
guaranteed_group_size: Optional[int] = None,
|
198 |
+
drop_remaining: bool = False):
|
199 |
+
_check_unpickable_fn(group_key_fn)
|
200 |
+
self.datapipe = datapipe
|
201 |
+
self.group_key_fn = group_key_fn
|
202 |
+
|
203 |
+
self.keep_key = keep_key
|
204 |
+
self.max_buffer_size = buffer_size
|
205 |
+
self.buffer_elements: DefaultDict[Any, List] = defaultdict(list)
|
206 |
+
self.curr_buffer_size = 0
|
207 |
+
self.group_size = group_size
|
208 |
+
self.guaranteed_group_size = None
|
209 |
+
if group_size is not None and buffer_size is not None:
|
210 |
+
assert 0 < group_size <= buffer_size
|
211 |
+
self.guaranteed_group_size = group_size
|
212 |
+
if guaranteed_group_size is not None:
|
213 |
+
assert group_size is not None and 0 < guaranteed_group_size <= group_size
|
214 |
+
self.guaranteed_group_size = guaranteed_group_size
|
215 |
+
self.drop_remaining = drop_remaining
|
216 |
+
self.wrapper_class = DataChunk
|
217 |
+
|
218 |
+
def _remove_biggest_key(self):
|
219 |
+
biggest_key = None
|
220 |
+
biggest_size = 0
|
221 |
+
result_to_yield = None
|
222 |
+
for findkey in self.buffer_elements.keys():
|
223 |
+
if len(self.buffer_elements[findkey]) > biggest_size:
|
224 |
+
biggest_size = len(self.buffer_elements[findkey])
|
225 |
+
biggest_key = findkey
|
226 |
+
|
227 |
+
if self.guaranteed_group_size is not None and biggest_size < self.guaranteed_group_size and not self.drop_remaining:
|
228 |
+
raise RuntimeError('Failed to group items', str(self.buffer_elements[biggest_key]))
|
229 |
+
|
230 |
+
if self.guaranteed_group_size is None or biggest_size >= self.guaranteed_group_size:
|
231 |
+
result_to_yield = self.buffer_elements[biggest_key]
|
232 |
+
|
233 |
+
self.curr_buffer_size -= biggest_size
|
234 |
+
del self.buffer_elements[biggest_key]
|
235 |
+
|
236 |
+
return result_to_yield
|
237 |
+
|
238 |
+
def __iter__(self):
|
239 |
+
for x in self.datapipe:
|
240 |
+
key = self.group_key_fn(x)
|
241 |
+
|
242 |
+
self.buffer_elements[key].append(x)
|
243 |
+
self.curr_buffer_size += 1
|
244 |
+
|
245 |
+
if self.group_size is not None and self.group_size == len(self.buffer_elements[key]):
|
246 |
+
result: DataChunk[Any] = self.wrapper_class(self.buffer_elements[key])
|
247 |
+
yield (key, result) if self.keep_key else result
|
248 |
+
self.curr_buffer_size -= len(self.buffer_elements[key])
|
249 |
+
del self.buffer_elements[key]
|
250 |
+
|
251 |
+
if self.curr_buffer_size == self.max_buffer_size:
|
252 |
+
result_to_yield = self._remove_biggest_key()
|
253 |
+
if result_to_yield is not None:
|
254 |
+
result = self.wrapper_class(result_to_yield)
|
255 |
+
yield (key, result) if self.keep_key else result
|
256 |
+
|
257 |
+
for key in tuple(self.buffer_elements.keys()):
|
258 |
+
result = self.wrapper_class(self.buffer_elements.pop(key))
|
259 |
+
self.curr_buffer_size -= len(result)
|
260 |
+
yield (key, result) if self.keep_key else result
|
261 |
+
|
262 |
+
def reset(self) -> None:
|
263 |
+
self.curr_buffer_size = 0
|
264 |
+
self.buffer_elements = defaultdict(list)
|
265 |
+
|
266 |
+
def __getstate__(self):
|
267 |
+
state = (
|
268 |
+
self.datapipe,
|
269 |
+
self.group_key_fn,
|
270 |
+
self.keep_key,
|
271 |
+
self.max_buffer_size,
|
272 |
+
self.group_size,
|
273 |
+
self.guaranteed_group_size,
|
274 |
+
self.drop_remaining,
|
275 |
+
self.wrapper_class,
|
276 |
+
self._valid_iterator_id,
|
277 |
+
self._number_of_samples_yielded,
|
278 |
+
)
|
279 |
+
if IterDataPipe.getstate_hook is not None:
|
280 |
+
return IterDataPipe.getstate_hook(state)
|
281 |
+
return state
|
282 |
+
|
283 |
+
def __setstate__(self, state):
|
284 |
+
(
|
285 |
+
self.datapipe,
|
286 |
+
self.group_key_fn,
|
287 |
+
self.keep_key,
|
288 |
+
self.max_buffer_size,
|
289 |
+
self.group_size,
|
290 |
+
self.guaranteed_group_size,
|
291 |
+
self.drop_remaining,
|
292 |
+
self.wrapper_class,
|
293 |
+
self._valid_iterator_id,
|
294 |
+
self._number_of_samples_yielded,
|
295 |
+
) = state
|
296 |
+
self.curr_buffer_size = 0
|
297 |
+
self.buffer_elements = defaultdict(list)
|
298 |
+
|
299 |
+
def __del__(self):
|
300 |
+
self.buffer_elements.clear()
|
venv/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/routeddecoder.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import BufferedIOBase
|
2 |
+
from typing import Any, Callable, Iterable, Iterator, Sized, Tuple
|
3 |
+
|
4 |
+
from torch.utils.data.datapipes._decorator import functional_datapipe
|
5 |
+
from torch.utils.data.datapipes.datapipe import IterDataPipe
|
6 |
+
from torch.utils.data.datapipes.utils.common import _deprecation_warning
|
7 |
+
from torch.utils.data.datapipes.utils.decoder import (
|
8 |
+
Decoder,
|
9 |
+
basichandlers as decoder_basichandlers,
|
10 |
+
imagehandler as decoder_imagehandler,
|
11 |
+
extension_extract_fn
|
12 |
+
)
|
13 |
+
|
14 |
+
__all__ = ["RoutedDecoderIterDataPipe", ]
|
15 |
+
|
16 |
+
|
17 |
+
@functional_datapipe('routed_decode')
|
18 |
+
class RoutedDecoderIterDataPipe(IterDataPipe[Tuple[str, Any]]):
|
19 |
+
r"""
|
20 |
+
Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple.
|
21 |
+
|
22 |
+
(functional name: ``routed_decode``)
|
23 |
+
|
24 |
+
Args:
|
25 |
+
datapipe: Iterable datapipe that provides pathname and binary stream in tuples
|
26 |
+
handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder
|
27 |
+
handlers will be set as default. If multiple handles are provided, the priority
|
28 |
+
order follows the order of handlers (the first handler has the top priority)
|
29 |
+
key_fn: Function for decoder to extract key from pathname to dispatch handlers.
|
30 |
+
Default is set to extract file extension from pathname
|
31 |
+
|
32 |
+
Note:
|
33 |
+
When ``key_fn`` is specified returning anything other than extension, the default
|
34 |
+
handler will not work and users need to specify custom handler. Custom handler
|
35 |
+
could use regex to determine the eligibility to handle data.
|
36 |
+
"""
|
37 |
+
|
38 |
+
def __init__(self,
|
39 |
+
datapipe: Iterable[Tuple[str, BufferedIOBase]],
|
40 |
+
*handlers: Callable,
|
41 |
+
key_fn: Callable = extension_extract_fn) -> None:
|
42 |
+
super().__init__()
|
43 |
+
self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe
|
44 |
+
if not handlers:
|
45 |
+
handlers = (decoder_basichandlers, decoder_imagehandler('torch'))
|
46 |
+
self.decoder = Decoder(*handlers, key_fn=key_fn)
|
47 |
+
_deprecation_warning(
|
48 |
+
type(self).__name__,
|
49 |
+
deprecation_version="1.12",
|
50 |
+
removal_version="1.13",
|
51 |
+
old_functional_name="routed_decode",
|
52 |
+
)
|
53 |
+
|
54 |
+
def add_handler(self, *handler: Callable) -> None:
|
55 |
+
self.decoder.add_handler(*handler)
|
56 |
+
|
57 |
+
def __iter__(self) -> Iterator[Tuple[str, Any]]:
|
58 |
+
for data in self.datapipe:
|
59 |
+
pathname = data[0]
|
60 |
+
result = self.decoder(data)
|
61 |
+
yield (pathname, result[pathname])
|
62 |
+
|
63 |
+
def __len__(self) -> int:
|
64 |
+
if isinstance(self.datapipe, Sized):
|
65 |
+
return len(self.datapipe)
|
66 |
+
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|