applied-ai-018 commited on
Commit
fb047ca
·
verified ·
1 Parent(s): a937d03

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llmeval-env/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so +3 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/utils/_config_module.py +369 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/utils/_contextlib.py +152 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/utils/_cpp_extension_versioner.py +58 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/utils/_cuda_trace.py +99 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/utils/_cxx_pytree.py +970 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/utils/_import_utils.py +42 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/utils/_stats.py +21 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/utils/_traceback.py +254 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/utils/backend_registration.py +339 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/utils/checkpoint.py +1439 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/utils/collect_env.py +624 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_backtrace.py +11 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_extension.py +0 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__init__.py +76 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/__init__.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataset.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/distributed.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph_settings.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/sampler.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py +5 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataloader.py +1479 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py +3 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py +184 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py +248 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py +430 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py +11 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py +433 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py +131 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py +18 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py +404 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi +689 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py +246 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataset.py +488 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/utils/data/distributed.py +137 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph.py +149 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph_settings.py +160 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/utils/data/sampler.py +305 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/utils/dlpack.py +121 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/utils/file_baton.py +49 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/utils/flop_counter.py +559 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/utils/hooks.py +252 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/utils/mkldnn.py +233 -0
.gitattributes CHANGED
@@ -198,3 +198,4 @@ llmeval-env/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64
198
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
199
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
200
  llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
 
 
198
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
199
  llmeval-env/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
200
  llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
201
+ llmeval-env/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6345c716b5d67adb2d3f2477c07c0b1a214a70aa7cb71101d99327aba0bfaa0
3
+ size 4438576
llmeval-env/lib/python3.10/site-packages/torch/utils/_config_module.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ import copy
4
+ import hashlib
5
+ import inspect
6
+ import io
7
+ import pickle
8
+ import tokenize
9
+ import unittest
10
+ import warnings
11
+ from types import FunctionType, ModuleType
12
+ from typing import Any, Dict, Optional, Set, Union
13
+ from unittest import mock
14
+
15
+ # Types saved/loaded in configs
16
+ CONFIG_TYPES = (int, float, bool, type(None), str, list, set, tuple, dict)
17
+
18
+
19
+ def install_config_module(module):
20
+ """
21
+ Converts a module-level config into a `ConfigModule()`.
22
+
23
+ See _config_typing.pyi for instructions on how to get the converted module to typecheck.
24
+ """
25
+
26
+ class ConfigModuleInstance(ConfigModule):
27
+ _bypass_keys = set({"_is_dirty", "_hash_digest"})
28
+
29
+ def visit(source, dest, prefix):
30
+ """Walk the module structure and move everything to module._config"""
31
+ for key, value in list(source.__dict__.items()):
32
+ if (
33
+ key.startswith("__")
34
+ or isinstance(value, (ModuleType, FunctionType))
35
+ or (hasattr(value, "__module__") and value.__module__ == "typing")
36
+ ):
37
+ continue
38
+
39
+ name = f"{prefix}{key}"
40
+ if isinstance(value, CONFIG_TYPES):
41
+ config[name] = value
42
+ default[name] = value
43
+ if dest is module:
44
+ delattr(module, key)
45
+ elif isinstance(value, type):
46
+ assert value.__module__ == module.__name__
47
+ # a subconfig with `class Blah:` syntax
48
+ proxy = SubConfigProxy(module, f"{name}.")
49
+ visit(value, proxy, f"{name}.")
50
+ setattr(dest, key, proxy)
51
+ else:
52
+ raise AssertionError(f"Unhandled config {key}={value} ({type(value)})")
53
+
54
+ config: Dict[str, Any] = dict()
55
+ default: Dict[str, Any] = dict()
56
+
57
+ compile_ignored_keys = get_assignments_with_compile_ignored_comments(module)
58
+
59
+ visit(module, module, "")
60
+ module._config = config
61
+ module._default = default
62
+ module._allowed_keys = set(config.keys())
63
+ module._compile_ignored_keys = compile_ignored_keys
64
+ module.__class__ = ConfigModuleInstance
65
+ module._is_dirty = True
66
+ module._hash_digest = None
67
+
68
+
69
+ COMPILE_IGNORED_MARKER = "@compile_ignored"
70
+
71
+
72
+ # Gets all the keys (i.e. assignments) with a @compile_ignored comment
73
+ def get_assignments_with_compile_ignored_comments(module):
74
+ source_code = inspect.getsource(module)
75
+ assignments = set()
76
+
77
+ # Tokenize the source code to retrieve comments
78
+ tokens = tokenize.tokenize(io.BytesIO(source_code.encode("utf-8")).readline)
79
+ current_comment = "", -1
80
+ prev_name = ""
81
+
82
+ for token in tokens:
83
+ if token.type == tokenize.COMMENT:
84
+ prev_name = ""
85
+ maybe_current = token.string.strip()
86
+ if COMPILE_IGNORED_MARKER in maybe_current:
87
+ assert current_comment == (
88
+ "",
89
+ -1,
90
+ ), f"unconsumed {COMPILE_IGNORED_MARKER}"
91
+ current_comment = maybe_current, token.start[0]
92
+ elif token.type == tokenize.NAME:
93
+ # Only accept the first name token, to handle if you have
94
+ # something like foo: Bar = ...
95
+ if not prev_name:
96
+ prev_name = token.string
97
+ elif token.type == tokenize.OP and token.string == "=":
98
+ # Check if the current assignment follows a comment
99
+ # with COMPILE_IGNORED_MARKER
100
+ if (
101
+ COMPILE_IGNORED_MARKER in current_comment[0]
102
+ and current_comment[1] == token.start[0] - 1
103
+ ):
104
+ assignments.add(prev_name)
105
+ current_comment = "", -1 # reset
106
+ prev_name = ""
107
+ assert current_comment == ("", -1), f"unconsumed {COMPILE_IGNORED_MARKER}"
108
+ return assignments
109
+
110
+
111
+ class ConfigModule(ModuleType):
112
+ # NOTE: This should be kept in sync with _config_typing.pyi.
113
+
114
+ # The default values of the configuration settings. This can be used to
115
+ # determine if the config has been changed or not.
116
+ _default: Dict[str, Any]
117
+ # The actual configuration settings. E.g., torch._dynamo.config.debug
118
+ # would live as "debug" in the key, and torch._inductor.config.triton.cudagraphs
119
+ # maps as "triton.cudagraphs"
120
+ _config: Dict[str, Any]
121
+ _allowed_keys: Set[str]
122
+ _bypass_keys: Set[str]
123
+ _compile_ignored_keys: Set[str]
124
+ _is_dirty: bool
125
+ _hash_digest: Optional[bytes]
126
+
127
+ def __init__(self):
128
+ raise NotImplementedError(
129
+ f"use {__name__}.install_config_module(sys.modules[__name__])"
130
+ )
131
+
132
+ def __setattr__(self, name, value):
133
+ if name in self._bypass_keys:
134
+ super().__setattr__(name, value)
135
+ elif name not in self._allowed_keys:
136
+ raise AttributeError(f"{self.__name__}.{name} does not exist")
137
+ else:
138
+ self._config[name] = value
139
+
140
+ def __getattr__(self, name):
141
+ try:
142
+ return self._config[name]
143
+ except KeyError as e:
144
+ # make hasattr() work properly
145
+ raise AttributeError(f"{self.__name__}.{name} does not exist") from e
146
+
147
+ def __delattr__(self, name):
148
+ # must support delete because unittest.mock.patch deletes
149
+ # then recreate things
150
+ del self._config[name]
151
+
152
+ def save_config(self) -> bytes:
153
+ """Convert config to a pickled blob"""
154
+ config = dict(self._config)
155
+ for key in config.get("_save_config_ignore", ()):
156
+ config.pop(key)
157
+ return pickle.dumps(config, protocol=2)
158
+
159
+ def codegen_config(self) -> str:
160
+ """Convert config to Python statements that replicate current config.
161
+ This does NOT include config settings that are at default values.
162
+ """
163
+ lines = []
164
+ mod = self.__name__
165
+ for k, v in self._config.items():
166
+ if k in self._config.get("_save_config_ignore", ()):
167
+ continue
168
+ if v == self._default[k]:
169
+ continue
170
+ lines.append(f"{mod}.{k} = {v!r}")
171
+ return "\n".join(lines)
172
+
173
+ def get_hash(self) -> bytes:
174
+ """Hashes the configs that are not compile_ignored"""
175
+ if self._is_dirty or self._hash_digest is None:
176
+ dict_to_hash = {
177
+ k: v
178
+ for k, v in self._config.items()
179
+ if k not in self._compile_ignored_keys
180
+ }
181
+ string_to_hash = repr(sorted(dict_to_hash.items()))
182
+ self._hash_digest = hashlib.md5(string_to_hash.encode("utf-8")).digest()
183
+ self._is_dirty = False
184
+ return self._hash_digest
185
+
186
+ def to_dict(self) -> Dict[str, Any]:
187
+ warnings.warn(
188
+ "config.to_dict() has been deprecated. It may no longer change the underlying config."
189
+ " use config.shallow_copy_dict() or config.get_config_copy() instead",
190
+ DeprecationWarning,
191
+ )
192
+ return self.shallow_copy_dict()
193
+
194
+ def shallow_copy_dict(self) -> Dict[str, Any]:
195
+ return {**self._config}
196
+
197
+ def load_config(self, maybe_pickled_config: Union[bytes, Dict[str, Any]]) -> None:
198
+ """Restore from a prior call to save_config() or shallow_copy_dict()"""
199
+ if not isinstance(maybe_pickled_config, dict):
200
+ config = pickle.loads(maybe_pickled_config)
201
+ else:
202
+ config = maybe_pickled_config
203
+ self._config.update(config)
204
+
205
+ def get_config_copy(self) -> Dict[str, Any]:
206
+ return copy.deepcopy(self._config)
207
+
208
+ def patch(
209
+ self,
210
+ arg1: Optional[Union[str, Dict[str, Any]]] = None,
211
+ arg2: Any = None,
212
+ **kwargs,
213
+ ):
214
+ """
215
+ Decorator and/or context manager to make temporary changes to a config.
216
+
217
+ As a decorator:
218
+
219
+ @config.patch("name", val)
220
+ @config.patch(name1=val1, name2=val2)
221
+ @config.patch({"name1": val1, "name2", val2})
222
+ def foo(...):
223
+ ...
224
+
225
+ As a context manager:
226
+
227
+ with config.patch("name", val):
228
+ ...
229
+ """
230
+ changes: Dict[str, Any]
231
+ if arg1 is not None:
232
+ if arg2 is not None:
233
+ assert isinstance(arg1, str)
234
+ # patch("key", True) syntax
235
+ changes = {arg1: arg2}
236
+ else:
237
+ assert isinstance(arg1, dict)
238
+ # patch({"key": True}) syntax
239
+ changes = arg1
240
+ assert not kwargs
241
+ else:
242
+ # patch(key=True) syntax
243
+ changes = kwargs
244
+ assert arg2 is None
245
+ assert isinstance(changes, dict), f"expected `dict` got {type(changes)}"
246
+ prior: Dict[str, Any] = {}
247
+ config = self
248
+ dirty = False
249
+
250
+ class ConfigPatch(ContextDecorator):
251
+ def __enter__(self):
252
+ assert not prior
253
+ nonlocal dirty
254
+ for key in changes.keys():
255
+ # KeyError on invalid entry
256
+ prior[key] = config._config[key]
257
+ dirty = key not in config._compile_ignored_keys
258
+ config._config.update(changes)
259
+ config._is_dirty = dirty
260
+
261
+ def __exit__(self, exc_type, exc_val, exc_tb):
262
+ nonlocal dirty
263
+ config._config.update(prior)
264
+ config._is_dirty = dirty
265
+ prior.clear()
266
+
267
+ return ConfigPatch()
268
+
269
+ def _make_closure_patcher(self, **changes):
270
+ """
271
+ A lower-overhead version of patch() for things on the critical path.
272
+
273
+ Usage:
274
+
275
+ # do this off the critical path
276
+ change_fn = config.make_closure_patcher(foo=True)
277
+
278
+ ...
279
+
280
+ revert = change_fn()
281
+ try:
282
+ ...
283
+ finally:
284
+ revert()
285
+
286
+ """
287
+ config = self._config
288
+
289
+ def change():
290
+ prior = {k: config[k] for k in changes}
291
+ config.update(changes)
292
+
293
+ def revert():
294
+ config.update(prior)
295
+
296
+ return revert
297
+
298
+ return change
299
+
300
+
301
+ class ContextDecorator(contextlib.ContextDecorator):
302
+ """
303
+ Same as contextlib.ContextDecorator, but with support for
304
+ `unittest.TestCase`
305
+ """
306
+
307
+ def __enter__(self):
308
+ raise NotImplementedError("NYI")
309
+
310
+ def __exit__(self, exc_type, exc_val, exc_tb):
311
+ raise NotImplementedError("NYI")
312
+
313
+ def __call__(self, func):
314
+ if isinstance(func, type) and issubclass(func, unittest.TestCase):
315
+
316
+ class _TestCase(func): # type: ignore[valid-type, misc]
317
+ @classmethod
318
+ def setUpClass(cls):
319
+ self.__enter__()
320
+ try:
321
+ super().setUpClass()
322
+ except Exception:
323
+ self.__exit__(None, None, None)
324
+ raise
325
+
326
+ @classmethod
327
+ def tearDownClass(cls):
328
+ try:
329
+ super().tearDownClass()
330
+ finally:
331
+ self.__exit__(None, None, None)
332
+
333
+ _TestCase.__name__ = func.__name__
334
+ _TestCase.__qualname__ = func.__qualname__
335
+ _TestCase.__module__ = func.__module__
336
+
337
+ return _TestCase
338
+
339
+ return super().__call__(func)
340
+
341
+
342
+ class SubConfigProxy:
343
+ """
344
+ Shim to redirect to main config.
345
+ `config.triton.cudagraphs` maps to _config["triton.cudagraphs"]
346
+ """
347
+
348
+ def __init__(self, config, prefix):
349
+ # `super().__setattr__` to bypass custom `__setattr__`
350
+ super().__setattr__("_config", config)
351
+ super().__setattr__("_prefix", prefix)
352
+
353
+ def __setattr__(self, name, value):
354
+ return self._config.__setattr__(self._prefix + name, value)
355
+
356
+ def __getattr__(self, name):
357
+ return self._config.__getattr__(self._prefix + name)
358
+
359
+ def __delattr__(self, name):
360
+ return self._config.__delattr__(self._prefix + name)
361
+
362
+
363
+ def patch_object(obj, name, value):
364
+ """
365
+ Workaround `mock.patch.object` issue with ConfigModule
366
+ """
367
+ if isinstance(obj, ConfigModule):
368
+ return obj.patch(name, value)
369
+ return mock.patch.object(obj, name, value)
llmeval-env/lib/python3.10/site-packages/torch/utils/_contextlib.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Extra utilities for working with context managers that should have been
2
+ # in the standard library but are not
3
+
4
+ import functools
5
+ import inspect
6
+ import warnings
7
+ import sys
8
+ from typing import Any, Callable, TypeVar, cast
9
+
10
+ # Used for annotating the decorator usage of _DecoratorContextManager (e.g.,
11
+ # 'no_grad' and 'enable_grad').
12
+ # See https://mypy.readthedocs.io/en/latest/generics.html#declaring-decorators
13
+ FuncType = Callable[..., Any]
14
+ F = TypeVar('F', bound=FuncType)
15
+
16
+
17
+ def _wrap_generator(ctx_factory, func):
18
+ """
19
+ Wrap each generator invocation with the context manager factory.
20
+
21
+ The input should be a function that returns a context manager,
22
+ not a context manager itself, to handle one-shot context managers.
23
+ """
24
+ @functools.wraps(func)
25
+ def generator_context(*args, **kwargs):
26
+ gen = func(*args, **kwargs)
27
+
28
+ # Generators are suspended and unsuspended at `yield`, hence we
29
+ # make sure the grad mode is properly set every time the execution
30
+ # flow returns into the wrapped generator and restored when it
31
+ # returns through our `yield` to our caller (see PR #49017).
32
+ try:
33
+ # Issuing `None` to a generator fires it up
34
+ with ctx_factory():
35
+ response = gen.send(None)
36
+
37
+ while True:
38
+ try:
39
+ # Forward the response to our caller and get its next request
40
+ request = yield response
41
+
42
+ except GeneratorExit:
43
+ # Inform the still active generator about its imminent closure
44
+ with ctx_factory():
45
+ gen.close()
46
+ raise
47
+
48
+ except BaseException:
49
+ # Propagate the exception thrown at us by the caller
50
+ with ctx_factory():
51
+ response = gen.throw(*sys.exc_info())
52
+
53
+ else:
54
+ # Pass the last request to the generator and get its response
55
+ with ctx_factory():
56
+ response = gen.send(request)
57
+
58
+ # We let the exceptions raised above by the generator's `.throw` or
59
+ # `.send` methods bubble up to our caller, except for StopIteration
60
+ except StopIteration as e:
61
+ # The generator informed us that it is done: take whatever its
62
+ # returned value (if any) was and indicate that we're done too
63
+ # by returning it (see docs for python's return-statement).
64
+ return e.value
65
+
66
+ return generator_context
67
+
68
+
69
+ def context_decorator(ctx, func):
70
+ """
71
+ Like contextlib.ContextDecorator.
72
+
73
+ But with the following differences:
74
+ 1. Is done by wrapping, rather than inheritance, so it works with context
75
+ managers that are implemented from C and thus cannot easily inherit from
76
+ Python classes
77
+ 2. Wraps generators in the intuitive way (c.f. https://bugs.python.org/issue37743)
78
+ 3. Errors out if you try to wrap a class, because it is ambiguous whether
79
+ or not you intended to wrap only the constructor
80
+
81
+ The input argument can either be a context manager (in which case it must
82
+ be a multi-shot context manager that can be directly invoked multiple times)
83
+ or a callable that produces a context manager.
84
+ """
85
+ assert not (callable(ctx) and hasattr(ctx, '__enter__')), (
86
+ f"Passed in {ctx} is both callable and also a valid context manager "
87
+ "(has __enter__), making it ambiguous which interface to use. If you "
88
+ "intended to pass a context manager factory, rewrite your call as "
89
+ "context_decorator(lambda: ctx()); if you intended to pass a context "
90
+ "manager directly, rewrite your call as context_decorator(lambda: ctx)"
91
+ )
92
+
93
+ if not callable(ctx):
94
+ def ctx_factory():
95
+ return ctx
96
+ else:
97
+ ctx_factory = ctx
98
+
99
+ if inspect.isclass(func):
100
+ raise RuntimeError(
101
+ "Cannot decorate classes; it is ambiguous whether or not only the "
102
+ "constructor or all methods should have the context manager applied; "
103
+ "additionally, decorating a class at definition-site will prevent "
104
+ "use of the identifier as a conventional type. "
105
+ "To specify which methods to decorate, decorate each of them "
106
+ "individually."
107
+ )
108
+
109
+ if inspect.isgeneratorfunction(func):
110
+ return _wrap_generator(ctx_factory, func)
111
+
112
+ @functools.wraps(func)
113
+ def decorate_context(*args, **kwargs):
114
+ with ctx_factory():
115
+ return func(*args, **kwargs)
116
+
117
+ return decorate_context
118
+
119
+
120
+ class _DecoratorContextManager:
121
+ """Allow a context manager to be used as a decorator."""
122
+
123
+ def __call__(self, orig_func: F) -> F:
124
+ if inspect.isclass(orig_func):
125
+ warnings.warn("Decorating classes is deprecated and will be disabled in "
126
+ "future versions. You should only decorate functions or methods. "
127
+ "To preserve the current behavior of class decoration, you can "
128
+ "directly decorate the `__init__` method and nothing else.")
129
+ func = cast(F, lambda *args, **kwargs: orig_func(*args, **kwargs))
130
+ else:
131
+ func = orig_func
132
+
133
+ return cast(F, context_decorator(self.clone, func))
134
+
135
+ def __enter__(self) -> None:
136
+ raise NotImplementedError
137
+
138
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
139
+ raise NotImplementedError
140
+
141
+ def clone(self):
142
+ # override this method if your children class takes __init__ parameters
143
+ return self.__class__()
144
+
145
+
146
+ class _NoParamDecoratorContextManager(_DecoratorContextManager):
147
+ """Allow a context manager to be used as a decorator without parentheses."""
148
+
149
+ def __new__(cls, orig_func=None):
150
+ if orig_func is None:
151
+ return super().__new__(cls)
152
+ return cls()(orig_func)
llmeval-env/lib/python3.10/site-packages/torch/utils/_cpp_extension_versioner.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+
3
+
4
+ Entry = collections.namedtuple('Entry', 'version, hash')
5
+
6
+
7
+ def update_hash(seed, value):
8
+ # Good old boost::hash_combine
9
+ # https://www.boost.org/doc/libs/1_35_0/doc/html/boost/hash_combine_id241013.html
10
+ return seed ^ (hash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2))
11
+
12
+
13
+ def hash_source_files(hash_value, source_files):
14
+ for filename in source_files:
15
+ with open(filename) as file:
16
+ hash_value = update_hash(hash_value, file.read())
17
+ return hash_value
18
+
19
+
20
+ def hash_build_arguments(hash_value, build_arguments):
21
+ for group in build_arguments:
22
+ if group:
23
+ for argument in group:
24
+ hash_value = update_hash(hash_value, argument)
25
+ return hash_value
26
+
27
+
28
+ class ExtensionVersioner:
29
+ def __init__(self):
30
+ self.entries = {}
31
+
32
+ def get_version(self, name):
33
+ entry = self.entries.get(name)
34
+ return None if entry is None else entry.version
35
+
36
+ def bump_version_if_changed(self,
37
+ name,
38
+ source_files,
39
+ build_arguments,
40
+ build_directory,
41
+ with_cuda,
42
+ is_python_module,
43
+ is_standalone):
44
+ hash_value = 0
45
+ hash_value = hash_source_files(hash_value, source_files)
46
+ hash_value = hash_build_arguments(hash_value, build_arguments)
47
+ hash_value = update_hash(hash_value, build_directory)
48
+ hash_value = update_hash(hash_value, with_cuda)
49
+ hash_value = update_hash(hash_value, is_python_module)
50
+ hash_value = update_hash(hash_value, is_standalone)
51
+
52
+ entry = self.entries.get(name)
53
+ if entry is None:
54
+ self.entries[name] = entry = Entry(0, hash_value)
55
+ elif hash_value != entry.hash:
56
+ self.entries[name] = entry = Entry(entry.version + 1, hash_value)
57
+
58
+ return entry.version
llmeval-env/lib/python3.10/site-packages/torch/utils/_cuda_trace.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Callable, Generic, List
3
+
4
+ from typing_extensions import ParamSpec # Python 3.10+
5
+
6
+ logger = logging.getLogger(__name__)
7
+ P = ParamSpec("P")
8
+
9
+
10
+ class CallbackRegistry(Generic[P]):
11
+ def __init__(self, name: str):
12
+ self.name = name
13
+ self.callback_list: List[Callable[P, None]] = []
14
+
15
+ def add_callback(self, cb: Callable[P, None]) -> None:
16
+ self.callback_list.append(cb)
17
+
18
+ def fire_callbacks(self, *args: P.args, **kwargs: P.kwargs) -> None:
19
+ for cb in self.callback_list:
20
+ try:
21
+ cb(*args, **kwargs)
22
+ except Exception as e:
23
+ logger.exception(
24
+ "Exception in callback for %s registered with CUDA trace", self.name
25
+ )
26
+
27
+
28
+ CUDAEventCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry(
29
+ "CUDA event creation"
30
+ )
31
+ CUDAEventDeletionCallbacks: "CallbackRegistry[int]" = CallbackRegistry(
32
+ "CUDA event deletion"
33
+ )
34
+ CUDAEventRecordCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry(
35
+ "CUDA event record"
36
+ )
37
+ CUDAEventWaitCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry(
38
+ "CUDA event wait"
39
+ )
40
+ CUDAMemoryAllocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry(
41
+ "CUDA memory allocation"
42
+ )
43
+ CUDAMemoryDeallocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry(
44
+ "CUDA memory deallocation"
45
+ )
46
+ CUDAStreamCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry(
47
+ "CUDA stream creation"
48
+ )
49
+ CUDADeviceSynchronizationCallbacks: "CallbackRegistry[[]]" = CallbackRegistry(
50
+ "CUDA device synchronization"
51
+ )
52
+ CUDAStreamSynchronizationCallbacks: "CallbackRegistry[int]" = CallbackRegistry(
53
+ "CUDA stream synchronization"
54
+ )
55
+ CUDAEventSynchronizationCallbacks: "CallbackRegistry[int]" = CallbackRegistry(
56
+ "CUDA event synchronization"
57
+ )
58
+
59
+
60
+ def register_callback_for_cuda_event_creation(cb: Callable[[int], None]) -> None:
61
+ CUDAEventCreationCallbacks.add_callback(cb)
62
+
63
+
64
+ def register_callback_for_cuda_event_deletion(cb: Callable[[int], None]) -> None:
65
+ CUDAEventDeletionCallbacks.add_callback(cb)
66
+
67
+
68
+ def register_callback_for_cuda_event_record(cb: Callable[[int, int], None]) -> None:
69
+ CUDAEventRecordCallbacks.add_callback(cb)
70
+
71
+
72
+ def register_callback_for_cuda_event_wait(cb: Callable[[int, int], None]) -> None:
73
+ CUDAEventWaitCallbacks.add_callback(cb)
74
+
75
+
76
+ def register_callback_for_cuda_memory_allocation(cb: Callable[[int], None]) -> None:
77
+ CUDAMemoryAllocationCallbacks.add_callback(cb)
78
+
79
+
80
+ def register_callback_for_cuda_memory_deallocation(cb: Callable[[int], None]) -> None:
81
+ CUDAMemoryDeallocationCallbacks.add_callback(cb)
82
+
83
+
84
+ def register_callback_for_cuda_stream_creation(cb: Callable[[int], None]) -> None:
85
+ CUDAStreamCreationCallbacks.add_callback(cb)
86
+
87
+
88
+ def register_callback_for_cuda_device_synchronization(cb: Callable[[], None]) -> None:
89
+ CUDADeviceSynchronizationCallbacks.add_callback(cb)
90
+
91
+
92
+ def register_callback_for_cuda_stream_synchronization(
93
+ cb: Callable[[int], None]
94
+ ) -> None:
95
+ CUDAStreamSynchronizationCallbacks.add_callback(cb)
96
+
97
+
98
+ def register_callback_for_cuda_event_synchronization(cb: Callable[[int], None]) -> None:
99
+ CUDAEventSynchronizationCallbacks.add_callback(cb)
llmeval-env/lib/python3.10/site-packages/torch/utils/_cxx_pytree.py ADDED
@@ -0,0 +1,970 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contains utility functions for working with nested python data structures.
3
+
4
+ A *pytree* is Python nested data structure. It is a tree in the sense that
5
+ nodes are Python collections (e.g., list, tuple, dict) and the leaves are
6
+ Python values. Furthermore, a pytree should not contain reference cycles.
7
+
8
+ pytrees are useful for working with nested collections of Tensors. For example,
9
+ one can use `tree_map` to map a function over all Tensors inside some nested
10
+ collection of Tensors and `tree_leaves` to get a flat list of all Tensors
11
+ inside some nested collection. pytrees are helpful for implementing nested
12
+ collection support for PyTorch APIs.
13
+ """
14
+
15
+ import functools
16
+ import sys
17
+ import types
18
+ import warnings
19
+ from typing import (
20
+ Any,
21
+ Callable,
22
+ Iterable,
23
+ List,
24
+ Optional,
25
+ overload,
26
+ Tuple,
27
+ Type,
28
+ TypeVar,
29
+ Union,
30
+ )
31
+
32
+ import torch
33
+
34
+ if torch._running_with_deploy(): # type: ignore[no-untyped-call]
35
+ raise ImportError("C++ pytree utilities do not work with torch::deploy.")
36
+
37
+ import optree
38
+ from optree import PyTreeSpec # direct import for type annotations
39
+
40
+ from torch.utils._pytree import KeyEntry
41
+
42
+
43
+ __all__ = [
44
+ "PyTree",
45
+ "Context",
46
+ "FlattenFunc",
47
+ "UnflattenFunc",
48
+ "DumpableContext",
49
+ "ToDumpableContextFn",
50
+ "FromDumpableContextFn",
51
+ "TreeSpec",
52
+ "LeafSpec",
53
+ "keystr",
54
+ "key_get",
55
+ "register_pytree_node",
56
+ "tree_flatten",
57
+ "tree_flatten_with_path",
58
+ "tree_unflatten",
59
+ "tree_leaves",
60
+ "tree_leaves_with_path",
61
+ "tree_structure",
62
+ "tree_map",
63
+ "tree_map_with_path",
64
+ "tree_map_",
65
+ "tree_map_only",
66
+ "tree_map_only_",
67
+ "tree_all",
68
+ "tree_any",
69
+ "tree_all_only",
70
+ "tree_any_only",
71
+ "treespec_dumps",
72
+ "treespec_loads",
73
+ "treespec_pprint",
74
+ ]
75
+
76
+
77
+ T = TypeVar("T")
78
+ S = TypeVar("S")
79
+ U = TypeVar("U")
80
+ R = TypeVar("R")
81
+
82
+
83
+ Context = Any
84
+ PyTree = Any
85
+ TreeSpec = PyTreeSpec
86
+ FlattenFunc = Callable[[PyTree], Tuple[List[Any], Context]]
87
+ UnflattenFunc = Callable[[Iterable[Any], Context], PyTree]
88
+ OpTreeUnflattenFunc = Callable[[Context, Iterable[Any]], PyTree]
89
+ DumpableContext = Any # Any json dumpable text
90
+ ToDumpableContextFn = Callable[[Context], DumpableContext]
91
+ FromDumpableContextFn = Callable[[DumpableContext], Context]
92
+ KeyPath = Tuple[KeyEntry, ...]
93
+ FlattenWithKeysFunc = Callable[[PyTree], Tuple[List[Tuple[KeyEntry, Any]], Any]]
94
+
95
+
96
+ def _reverse_args(func: UnflattenFunc) -> OpTreeUnflattenFunc:
97
+ @functools.wraps(func)
98
+ def wrapped(*args: Any, **kwargs: Any) -> Any:
99
+ return func(*reversed(args), **kwargs)
100
+
101
+ return wrapped
102
+
103
+
104
+ def register_pytree_node(
105
+ cls: Type[Any],
106
+ flatten_fn: FlattenFunc,
107
+ unflatten_fn: UnflattenFunc,
108
+ *,
109
+ serialized_type_name: Optional[str] = None,
110
+ to_dumpable_context: Optional[ToDumpableContextFn] = None,
111
+ from_dumpable_context: Optional[FromDumpableContextFn] = None,
112
+ flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None,
113
+ ) -> None:
114
+ """Register a container-like type as pytree node.
115
+
116
+ Args:
117
+ cls (type): A Python type to treat as an internal pytree node.
118
+ flatten_fn (callable): A function to be used during flattening, taking an instance of
119
+ ``cls`` and returning a pair, with (1) an iterable for the children to be flattened
120
+ recursively, and (2) some hashable auxiliary data to be stored in the treespec and to be
121
+ passed to the ``unflatten_fn``.
122
+ unflatten_fn (callable): A function taking two arguments: the auxiliary data that was
123
+ returned by ``flatten_fn`` and stored in the treespec, and the unflattened children.
124
+ The function should return an instance of ``cls``.
125
+ serialized_type_name (str, optional): A keyword argument used to specify the fully
126
+ qualified name used when serializing the tree spec.
127
+ to_dumpable_context (callable, optional): An optional keyword argument to custom specify how
128
+ to convert the context of the pytree to a custom json dumpable representation. This is
129
+ used for json serialization, which is being used in :mod:`torch.export` right now.
130
+ from_dumpable_context (callable, optional): An optional keyword argument to custom specify
131
+ how to convert the custom json dumpable representation of the context back to the
132
+ original context. This is used for json deserialization, which is being used in
133
+ :mod:`torch.export` right now.
134
+
135
+ Example::
136
+
137
+ >>> # xdoctest: +SKIP
138
+ >>> # Registry a Python type with lambda functions
139
+ >>> register_pytree_node(
140
+ ... set,
141
+ ... lambda s: (sorted(s), None, None),
142
+ ... lambda children, _: set(children),
143
+ ... )
144
+ """
145
+ if flatten_with_keys_fn is not None:
146
+ raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
147
+
148
+ _private_register_pytree_node(
149
+ cls,
150
+ flatten_fn,
151
+ unflatten_fn,
152
+ serialized_type_name=serialized_type_name,
153
+ to_dumpable_context=to_dumpable_context,
154
+ from_dumpable_context=from_dumpable_context,
155
+ )
156
+
157
+ from . import _pytree as python
158
+
159
+ python._private_register_pytree_node(
160
+ cls,
161
+ flatten_fn,
162
+ unflatten_fn,
163
+ serialized_type_name=serialized_type_name,
164
+ to_dumpable_context=to_dumpable_context,
165
+ from_dumpable_context=from_dumpable_context,
166
+ )
167
+
168
+
169
+ def _register_pytree_node(
170
+ cls: Type[Any],
171
+ flatten_fn: FlattenFunc,
172
+ unflatten_fn: UnflattenFunc,
173
+ *,
174
+ serialized_type_name: Optional[str] = None,
175
+ to_dumpable_context: Optional[ToDumpableContextFn] = None,
176
+ from_dumpable_context: Optional[FromDumpableContextFn] = None,
177
+ ) -> None:
178
+ """Register a container-like type as pytree node for the C++ pytree only.
179
+
180
+ The ``namespace`` argument is used to avoid collisions that occur when different libraries
181
+ register the same Python type with different behaviors. It is recommended to add a unique prefix
182
+ to the namespace to avoid conflicts with other libraries. Namespaces can also be used to specify
183
+ the same class in different namespaces for different use cases.
184
+
185
+ .. warning::
186
+ For safety reasons, a ``namespace`` must be specified while registering a custom type. It is
187
+ used to isolate the behavior of flattening and unflattening a pytree node type. This is to
188
+ prevent accidental collisions between different libraries that may register the same type.
189
+
190
+ Args:
191
+ cls (type): A Python type to treat as an internal pytree node.
192
+ flatten_fn (callable): A function to be used during flattening, taking an instance of
193
+ ``cls`` and returning a pair, with (1) an iterable for the children to be flattened
194
+ recursively, and (2) some hashable auxiliary data to be stored in the treespec and to be
195
+ passed to the ``unflatten_fn``.
196
+ unflatten_fn (callable): A function taking two arguments: the auxiliary data that was
197
+ returned by ``flatten_fn`` and stored in the treespec, and the unflattened children.
198
+ The function should return an instance of ``cls``.
199
+ serialized_type_name (str, optional): A keyword argument used to specify the fully
200
+ qualified name used when serializing the tree spec.
201
+ to_dumpable_context (callable, optional): An optional keyword argument to custom specify how
202
+ to convert the context of the pytree to a custom json dumpable representation. This is
203
+ used for json serialization, which is being used in :mod:`torch.export` right now.
204
+ from_dumpable_context (callable, optional): An optional keyword argument to custom specify
205
+ how to convert the custom json dumpable representation of the context back to the
206
+ original context. This is used for json deserialization, which is being used in
207
+ :mod:`torch.export` right now.
208
+ """
209
+ warnings.warn(
210
+ "torch.utils._cxx_pytree._register_pytree_node is deprecated. "
211
+ "Please use torch.utils._cxx_pytree.register_pytree_node instead.",
212
+ stacklevel=2,
213
+ )
214
+
215
+ _private_register_pytree_node(
216
+ cls,
217
+ flatten_fn,
218
+ unflatten_fn,
219
+ serialized_type_name=serialized_type_name,
220
+ to_dumpable_context=to_dumpable_context,
221
+ from_dumpable_context=from_dumpable_context,
222
+ )
223
+
224
+
225
+ def _private_register_pytree_node(
226
+ cls: Type[Any],
227
+ flatten_fn: FlattenFunc,
228
+ unflatten_fn: UnflattenFunc,
229
+ *,
230
+ serialized_type_name: Optional[str] = None,
231
+ to_dumpable_context: Optional[ToDumpableContextFn] = None,
232
+ from_dumpable_context: Optional[FromDumpableContextFn] = None,
233
+ ) -> None:
234
+ """This is an internal function that is used to register a pytree node type
235
+ for the C++ pytree only. End-users should use :func:`register_pytree_node`
236
+ instead.
237
+ """
238
+ # TODO(XuehaiPan): remove this condition when we make Python pytree out-of-box support
239
+ # PyStructSequence types
240
+ if not optree.is_structseq_class(cls):
241
+ optree.register_pytree_node(
242
+ cls,
243
+ flatten_fn,
244
+ _reverse_args(unflatten_fn),
245
+ namespace="torch",
246
+ )
247
+
248
+
249
+ def tree_flatten(
250
+ tree: PyTree,
251
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
252
+ ) -> Tuple[List[Any], TreeSpec]:
253
+ """Flatten a pytree.
254
+
255
+ See also :func:`tree_unflatten`.
256
+
257
+ The flattening order (i.e., the order of elements in the output list) is deterministic,
258
+ corresponding to a left-to-right depth-first tree traversal.
259
+
260
+ >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5}
261
+ >>> tree_flatten(tree)
262
+ ([1, 2, 3, 4, None, 5], PyTreeSpec({'a': *, 'b': (*, [*, *]), 'c': *, 'd': *}, NoneIsLeaf))
263
+ >>> tree_flatten(1)
264
+ ([1], PyTreeSpec(*, NoneIsLeaf))
265
+ >>> tree_flatten(None)
266
+ ([None], PyTreeSpec(*, NoneIsLeaf))
267
+
268
+ For unordered dictionaries, :class:`dict` and :class:`collections.defaultdict`, the order is
269
+ dependent on the **sorted** keys in the dictionary. Please use :class:`collections.OrderedDict`
270
+ if you want to keep the keys in the insertion order.
271
+
272
+ >>> from collections import OrderedDict
273
+ >>> tree = OrderedDict([('b', (2, [3, 4])), ('a', 1), ('c', None), ('d', 5)])
274
+ >>> tree_flatten(tree)
275
+ ([2, 3, 4, 1, None, 5], PyTreeSpec(OrderedDict([('b', (*, [*, *])), ('a', *), ('c', *), ('d', *)]), NoneIsLeaf))
276
+
277
+ Args:
278
+ tree (pytree): A pytree to flatten.
279
+ is_leaf (callable, optional): An extra leaf predicate function that will be called at each
280
+ flattening step. The function should have a single argument with signature
281
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
282
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
283
+ leaf or not. If the function is not specified, the default pytree registry will be used.
284
+
285
+ Returns:
286
+ A pair ``(leaves, treespec)`` where the first element is a list of leaf values and the
287
+ second element is a treespec representing the structure of the pytree.
288
+ """
289
+ return optree.tree_flatten( # type: ignore[return-value]
290
+ tree,
291
+ is_leaf=is_leaf,
292
+ none_is_leaf=True,
293
+ namespace="torch",
294
+ )
295
+
296
+
297
+ def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree:
298
+ """Reconstruct a pytree from the treespec and the leaves.
299
+
300
+ The inverse of :func:`tree_flatten`.
301
+
302
+ >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5}
303
+ >>> leaves, treespec = tree_flatten(tree)
304
+ >>> tree == tree_unflatten(leaves, treespec)
305
+ True
306
+
307
+ Args:
308
+ leaves (iterable): The list of leaves to use for reconstruction. The list must match the
309
+ number of leaves of the treespec.
310
+ treespec (TreeSpec): The treespec to reconstruct.
311
+
312
+ Returns:
313
+ The reconstructed pytree, containing the ``leaves`` placed in the structure described by
314
+ ``treespec``.
315
+ """
316
+ if not isinstance(treespec, TreeSpec):
317
+ raise TypeError(
318
+ f"tree_unflatten(values, spec): Expected `spec` to be instance of "
319
+ f"TreeSpec but got item of type {type(treespec)}."
320
+ )
321
+ return optree.tree_unflatten(treespec, leaves) # type: ignore[arg-type]
322
+
323
+
324
+ def tree_leaves(
325
+ tree: PyTree,
326
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
327
+ ) -> List[Any]:
328
+ """Get the leaves of a pytree.
329
+
330
+ See also :func:`tree_flatten`.
331
+
332
+ >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5}
333
+ >>> tree_leaves(tree)
334
+ [1, 2, 3, 4, None, 5]
335
+ >>> tree_leaves(1)
336
+ [1]
337
+ >>> tree_leaves(None)
338
+ [None]
339
+
340
+ Args:
341
+ tree (pytree): A pytree to flatten.
342
+ is_leaf (callable, optional): An extra leaf predicate function that will be called at each
343
+ flattening step. The function should have a single argument with signature
344
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
345
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
346
+ leaf or not. If the function is not specified, the default pytree registry will be used.
347
+
348
+ Returns:
349
+ A list of leaf values.
350
+ """
351
+ return optree.tree_leaves(
352
+ tree,
353
+ is_leaf=is_leaf,
354
+ none_is_leaf=True,
355
+ namespace="torch",
356
+ )
357
+
358
+
359
+ def tree_structure(
360
+ tree: PyTree,
361
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
362
+ ) -> TreeSpec:
363
+ """Get the treespec for a pytree.
364
+
365
+ See also :func:`tree_flatten`.
366
+
367
+ >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5}
368
+ >>> tree_structure(tree)
369
+ PyTreeSpec({'a': *, 'b': (*, [*, *]), 'c': *, 'd': *}, NoneIsLeaf)
370
+ >>> tree_structure(1)
371
+ PyTreeSpec(*, NoneIsLeaf)
372
+ >>> tree_structure(None)
373
+ PyTreeSpec(*, NoneIsLeaf)
374
+
375
+ Args:
376
+ tree (pytree): A pytree to flatten.
377
+ is_leaf (callable, optional): An extra leaf predicate function that will be called at each
378
+ flattening step. The function should have a single argument with signature
379
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
380
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
381
+ leaf or not. If the function is not specified, the default pytree registry will be used.
382
+
383
+ Returns:
384
+ A treespec object representing the structure of the pytree.
385
+ """
386
+ return optree.tree_structure( # type: ignore[return-value]
387
+ tree,
388
+ is_leaf=is_leaf,
389
+ none_is_leaf=True,
390
+ namespace="torch",
391
+ )
392
+
393
+
394
+ def tree_map(
395
+ func: Callable[..., Any],
396
+ tree: PyTree,
397
+ *rests: PyTree,
398
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
399
+ ) -> PyTree:
400
+ """Map a multi-input function over pytree args to produce a new pytree.
401
+
402
+ See also :func:`tree_map_`.
403
+
404
+ >>> tree_map(lambda x: x + 1, {'x': 7, 'y': (42, 64)})
405
+ {'x': 8, 'y': (43, 65)}
406
+ >>> tree_map(lambda x: x is None, {'x': 7, 'y': (42, 64), 'z': None})
407
+ {'x': False, 'y': (False, False), 'z': True}
408
+
409
+ If multiple inputs are given, the structure of the tree is taken from the first input;
410
+ subsequent inputs need only have ``tree`` as a prefix:
411
+
412
+ >>> tree_map(lambda x, y: [x] + y, [5, 6], [[7, 9], [1, 2]])
413
+ [[5, 7, 9], [6, 1, 2]]
414
+
415
+ Args:
416
+ func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the
417
+ corresponding leaves of the pytrees.
418
+ tree (pytree): A pytree to be mapped over, with each leaf providing the first positional
419
+ argument to function ``func``.
420
+ rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as
421
+ ``tree`` or has ``tree`` as a prefix.
422
+ is_leaf (callable, optional): An extra leaf predicate function that will be called at each
423
+ flattening step. The function should have a single argument with signature
424
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
425
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
426
+ leaf or not. If the function is not specified, the default pytree registry will be used.
427
+
428
+ Returns:
429
+ A new pytree with the same structure as ``tree`` but with the value at each leaf given by
430
+ ``func(x, *xs)`` where ``x`` is the value at the corresponding leaf in ``tree`` and ``xs``
431
+ is the tuple of values at corresponding nodes in ``rests``.
432
+ """
433
+ return optree.tree_map(
434
+ func,
435
+ tree,
436
+ *rests,
437
+ is_leaf=is_leaf,
438
+ none_is_leaf=True,
439
+ namespace="torch",
440
+ )
441
+
442
+
443
+ def tree_map_(
444
+ func: Callable[..., Any],
445
+ tree: PyTree,
446
+ *rests: PyTree,
447
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
448
+ ) -> PyTree:
449
+ """Like :func:`tree_map`, but do an inplace call on each leaf and return the original tree.
450
+
451
+ See also :func:`tree_map`.
452
+
453
+ Args:
454
+ func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the
455
+ corresponding leaves of the pytrees.
456
+ tree (pytree): A pytree to be mapped over, with each leaf providing the first positional
457
+ argument to function ``func``.
458
+ rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as
459
+ ``tree`` or has ``tree`` as a prefix.
460
+ is_leaf (callable, optional): An extra leaf predicate function that will be called at each
461
+ flattening step. The function should have a single argument with signature
462
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
463
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
464
+ leaf or not. If the function is not specified, the default pytree registry will be used.
465
+
466
+ Returns:
467
+ The original ``tree`` with the value at each leaf is given by the side-effect of function
468
+ ``func(x, *xs)`` (not the return value) where ``x`` is the value at the corresponding leaf
469
+ in ``tree`` and ``xs`` is the tuple of values at values at corresponding nodes in ``rests``.
470
+ """
471
+ return optree.tree_map_(
472
+ func,
473
+ tree,
474
+ *rests,
475
+ is_leaf=is_leaf,
476
+ none_is_leaf=True,
477
+ namespace="torch",
478
+ )
479
+
480
+
481
+ Type2 = Tuple[Type[T], Type[S]]
482
+ Type3 = Tuple[Type[T], Type[S], Type[U]]
483
+ if sys.version_info >= (3, 10):
484
+ TypeAny = Union[Type[Any], Tuple[Type[Any], ...], types.UnionType]
485
+ else:
486
+ TypeAny = Union[Type[Any], Tuple[Type[Any], ...]]
487
+
488
+ Fn2 = Callable[[Union[T, S]], R]
489
+ Fn3 = Callable[[Union[T, S, U]], R]
490
+ Fn = Callable[[T], R]
491
+ FnAny = Callable[[Any], R]
492
+
493
+ MapOnlyFn = Callable[[T], Callable[[Any], Any]]
494
+
495
+
496
+ # These specializations help with type inference on the lambda passed to this
497
+ # function
498
+ @overload
499
+ def map_only(__type_or_types_or_pred: Type2[T, S]) -> MapOnlyFn[Fn2[T, S, Any]]:
500
+ ...
501
+
502
+
503
+ @overload
504
+ def map_only(__type_or_types_or_pred: Type3[T, S, U]) -> MapOnlyFn[Fn3[T, S, U, Any]]:
505
+ ...
506
+
507
+
508
+ @overload
509
+ def map_only(__type_or_types_or_pred: Type[T]) -> MapOnlyFn[Fn[T, Any]]:
510
+ ...
511
+
512
+
513
+ # This specialization is needed for the implementations below that call
514
+ @overload
515
+ def map_only(__type_or_types_or_pred: TypeAny) -> MapOnlyFn[FnAny[Any]]:
516
+ ...
517
+
518
+
519
+ @overload
520
+ def map_only(__type_or_types_or_pred: Callable[[Any], bool]) -> MapOnlyFn[FnAny[Any]]:
521
+ ...
522
+
523
+
524
+ def map_only(
525
+ __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]]
526
+ ) -> MapOnlyFn[FnAny[Any]]:
527
+ """
528
+ Suppose you are writing a tree_map over tensors, leaving everything
529
+ else unchanged. Ordinarily you would have to write:
530
+
531
+ def go(t):
532
+ if isinstance(t, Tensor):
533
+ return ...
534
+ else:
535
+ return t
536
+
537
+ With this function, you only need to write:
538
+
539
+ @map_only(Tensor)
540
+ def go(t):
541
+ return ...
542
+
543
+ You can also directly use 'tree_map_only'
544
+ """
545
+ if isinstance(__type_or_types_or_pred, (type, tuple)) or (
546
+ sys.version_info >= (3, 10)
547
+ and isinstance(__type_or_types_or_pred, types.UnionType)
548
+ ):
549
+
550
+ def pred(x: Any) -> bool:
551
+ return isinstance(x, __type_or_types_or_pred) # type: ignore[arg-type]
552
+
553
+ elif callable(__type_or_types_or_pred):
554
+ pred = __type_or_types_or_pred # type: ignore[assignment]
555
+ else:
556
+ raise TypeError("Argument must be a type, a tuple of types, or a callable.")
557
+
558
+ def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]:
559
+ @functools.wraps(func)
560
+ def wrapped(x: T) -> Any:
561
+ if pred(x):
562
+ return func(x)
563
+ return x
564
+
565
+ return wrapped
566
+
567
+ return wrapper
568
+
569
+
570
+ @overload
571
+ def tree_map_only(
572
+ __type_or_types_or_pred: Type[T],
573
+ func: Fn[T, Any],
574
+ tree: PyTree,
575
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
576
+ ) -> PyTree:
577
+ ...
578
+
579
+
580
+ @overload
581
+ def tree_map_only(
582
+ __type_or_types_or_pred: Type2[T, S],
583
+ func: Fn2[T, S, Any],
584
+ tree: PyTree,
585
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
586
+ ) -> PyTree:
587
+ ...
588
+
589
+
590
+ @overload
591
+ def tree_map_only(
592
+ __type_or_types_or_pred: Type3[T, S, U],
593
+ func: Fn3[T, S, U, Any],
594
+ tree: PyTree,
595
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
596
+ ) -> PyTree:
597
+ ...
598
+
599
+
600
+ @overload
601
+ def tree_map_only(
602
+ __type_or_types_or_pred: Callable[[Any], bool],
603
+ func: FnAny[Any],
604
+ tree: PyTree,
605
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
606
+ ) -> PyTree:
607
+ ...
608
+
609
+
610
+ def tree_map_only(
611
+ __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]],
612
+ func: FnAny[Any],
613
+ tree: PyTree,
614
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
615
+ ) -> PyTree:
616
+ return tree_map(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf)
617
+
618
+
619
+ @overload
620
+ def tree_map_only_(
621
+ __type_or_types_or_pred: Type[T],
622
+ func: Fn[T, Any],
623
+ tree: PyTree,
624
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
625
+ ) -> PyTree:
626
+ ...
627
+
628
+
629
+ @overload
630
+ def tree_map_only_(
631
+ __type_or_types_or_pred: Type2[T, S],
632
+ func: Fn2[T, S, Any],
633
+ tree: PyTree,
634
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
635
+ ) -> PyTree:
636
+ ...
637
+
638
+
639
+ @overload
640
+ def tree_map_only_(
641
+ __type_or_types_or_pred: Type3[T, S, U],
642
+ func: Fn3[T, S, U, Any],
643
+ tree: PyTree,
644
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
645
+ ) -> PyTree:
646
+ ...
647
+
648
+
649
+ @overload
650
+ def tree_map_only_(
651
+ __type_or_types_or_pred: Callable[[Any], bool],
652
+ func: FnAny[Any],
653
+ tree: PyTree,
654
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
655
+ ) -> PyTree:
656
+ ...
657
+
658
+
659
+ def tree_map_only_(
660
+ __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]],
661
+ func: FnAny[Any],
662
+ tree: PyTree,
663
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
664
+ ) -> PyTree:
665
+ return tree_map_(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf)
666
+
667
+
668
+ def tree_all(
669
+ pred: Callable[[Any], bool],
670
+ tree: PyTree,
671
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
672
+ ) -> bool:
673
+ flat_args = tree_leaves(tree, is_leaf=is_leaf)
674
+ return all(map(pred, flat_args))
675
+
676
+
677
+ def tree_any(
678
+ pred: Callable[[Any], bool],
679
+ tree: PyTree,
680
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
681
+ ) -> bool:
682
+ flat_args = tree_leaves(tree, is_leaf=is_leaf)
683
+ return any(map(pred, flat_args))
684
+
685
+
686
+ @overload
687
+ def tree_all_only(
688
+ __type_or_types: Type[T],
689
+ pred: Fn[T, bool],
690
+ tree: PyTree,
691
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
692
+ ) -> bool:
693
+ ...
694
+
695
+
696
+ @overload
697
+ def tree_all_only(
698
+ __type_or_types: Type2[T, S],
699
+ pred: Fn2[T, S, bool],
700
+ tree: PyTree,
701
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
702
+ ) -> bool:
703
+ ...
704
+
705
+
706
+ @overload
707
+ def tree_all_only(
708
+ __type_or_types: Type3[T, S, U],
709
+ pred: Fn3[T, S, U, bool],
710
+ tree: PyTree,
711
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
712
+ ) -> bool:
713
+ ...
714
+
715
+
716
+ def tree_all_only(
717
+ __type_or_types: TypeAny,
718
+ pred: FnAny[bool],
719
+ tree: PyTree,
720
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
721
+ ) -> bool:
722
+ flat_args = tree_leaves(tree, is_leaf=is_leaf)
723
+ return all(pred(x) for x in flat_args if isinstance(x, __type_or_types))
724
+
725
+
726
+ @overload
727
+ def tree_any_only(
728
+ __type_or_types: Type[T],
729
+ pred: Fn[T, bool],
730
+ tree: PyTree,
731
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
732
+ ) -> bool:
733
+ ...
734
+
735
+
736
+ @overload
737
+ def tree_any_only(
738
+ __type_or_types: Type2[T, S],
739
+ pred: Fn2[T, S, bool],
740
+ tree: PyTree,
741
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
742
+ ) -> bool:
743
+ ...
744
+
745
+
746
+ @overload
747
+ def tree_any_only(
748
+ __type_or_types: Type3[T, S, U],
749
+ pred: Fn3[T, S, U, bool],
750
+ tree: PyTree,
751
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
752
+ ) -> bool:
753
+ ...
754
+
755
+
756
+ def tree_any_only(
757
+ __type_or_types: TypeAny,
758
+ pred: FnAny[bool],
759
+ tree: PyTree,
760
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
761
+ ) -> bool:
762
+ flat_args = tree_leaves(tree, is_leaf=is_leaf)
763
+ return any(pred(x) for x in flat_args if isinstance(x, __type_or_types))
764
+
765
+
766
+ def broadcast_prefix(
767
+ prefix_tree: PyTree,
768
+ full_tree: PyTree,
769
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
770
+ ) -> List[Any]:
771
+ """Return a list of broadcasted leaves in ``prefix_tree`` to match the number of leaves in ``full_tree``.
772
+
773
+ If a ``prefix_tree`` is a prefix of a ``full_tree``, this means the ``full_tree`` can be
774
+ constructed by replacing the leaves of ``prefix_tree`` with appropriate **subtrees**.
775
+
776
+ This function returns a list of leaves with the same size as ``full_tree``. The leaves are
777
+ replicated from ``prefix_tree``. The number of replicas is determined by the corresponding
778
+ subtree in ``full_tree``.
779
+
780
+ >>> broadcast_prefix(1, [1, 2, 3])
781
+ [1, 1, 1]
782
+ >>> broadcast_prefix([1, 2, 3], [1, 2, 3])
783
+ [1, 2, 3]
784
+ >>> broadcast_prefix([1, 2, 3], [1, 2, 3, 4])
785
+ Traceback (most recent call last):
786
+ ...
787
+ ValueError: list arity mismatch; expected: 3, got: 4; list: [1, 2, 3, 4].
788
+ >>> broadcast_prefix([1, 2, 3], [1, 2, (3, 4)])
789
+ [1, 2, 3, 3]
790
+ >>> broadcast_prefix([1, 2, 3], [1, 2, {'a': 3, 'b': 4, 'c': (None, 5)}])
791
+ [1, 2, 3, 3, 3, 3]
792
+
793
+ Args:
794
+ prefix_tree (pytree): A pytree with the same structure as a prefix of ``full_tree``.
795
+ full_tree (pytree): A pytree with the same structure as a suffix of ``prefix_tree``.
796
+ is_leaf (callable, optional): An extra leaf predicate function that will be called at each
797
+ flattening step. The function should have a single argument with signature
798
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
799
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
800
+ leaf or not. If the function is not specified, the default pytree registry will be used.
801
+
802
+ Returns:
803
+ A list of leaves in ``prefix_tree`` broadcasted to match the number of leaves in ``full_tree``.
804
+ """
805
+ return optree.broadcast_prefix(
806
+ prefix_tree,
807
+ full_tree,
808
+ is_leaf=is_leaf,
809
+ none_is_leaf=True,
810
+ namespace="torch",
811
+ )
812
+
813
+
814
+ # Broadcasts a pytree to the provided TreeSpec and returns the flattened
815
+ # values. If this is not possible, then this function returns None.
816
+ #
817
+ # For example, given pytree=0 and spec=TreeSpec(list, None, [LeafSpec(), LeafSpec()]),
818
+ # would return [0, 0]. This is useful for part of the vmap implementation:
819
+ # a user can pass in vmap(fn, in_dims)(*inputs). `in_dims` should be
820
+ # broadcastable to the tree structure of `inputs` and we use
821
+ # _broadcast_to_and_flatten to check this.
822
+ def _broadcast_to_and_flatten(
823
+ tree: PyTree,
824
+ treespec: TreeSpec,
825
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
826
+ ) -> Optional[List[Any]]:
827
+ assert isinstance(treespec, TreeSpec)
828
+ full_tree = tree_unflatten([0] * treespec.num_leaves, treespec)
829
+ try:
830
+ return broadcast_prefix(tree, full_tree, is_leaf=is_leaf)
831
+ except ValueError:
832
+ return None
833
+
834
+
835
+ def treespec_dumps(treespec: TreeSpec, protocol: Optional[int] = None) -> str:
836
+ """Serialize a treespec to a JSON string."""
837
+ if not isinstance(treespec, TreeSpec):
838
+ raise TypeError(
839
+ f"treespec_dumps(spec): Expected `spec` to be instance of "
840
+ f"TreeSpec but got item of type {type(treespec)}."
841
+ )
842
+ from ._pytree import (
843
+ tree_structure as _tree_structure,
844
+ treespec_dumps as _treespec_dumps,
845
+ )
846
+
847
+ orig_treespec = _tree_structure(tree_unflatten([0] * treespec.num_leaves, treespec))
848
+ return _treespec_dumps(orig_treespec, protocol=protocol)
849
+
850
+
851
+ def treespec_loads(serialized: str) -> TreeSpec:
852
+ """Deserialize a treespec from a JSON string."""
853
+ from ._pytree import (
854
+ tree_unflatten as _tree_unflatten,
855
+ treespec_loads as _treespec_loads,
856
+ )
857
+
858
+ orig_treespec = _treespec_loads(serialized)
859
+ dummy_tree = _tree_unflatten([0] * orig_treespec.num_leaves, orig_treespec)
860
+ treespec = tree_structure(dummy_tree)
861
+ return treespec
862
+
863
+
864
+ class _DummyLeaf:
865
+ def __repr__(self) -> str:
866
+ return "*"
867
+
868
+
869
+ def treespec_pprint(treespec: TreeSpec) -> str:
870
+ dummy_tree = tree_unflatten(
871
+ [_DummyLeaf() for _ in range(treespec.num_leaves)],
872
+ treespec,
873
+ )
874
+ return repr(dummy_tree)
875
+
876
+
877
+ class LeafSpecMeta(type(TreeSpec)): # type: ignore[misc]
878
+ def __instancecheck__(self, instance: object) -> bool:
879
+ return isinstance(instance, TreeSpec) and instance.is_leaf()
880
+
881
+
882
+ class LeafSpec(TreeSpec, metaclass=LeafSpecMeta):
883
+ def __new__(cls) -> "LeafSpec":
884
+ return optree.treespec_leaf(none_is_leaf=True) # type: ignore[return-value]
885
+
886
+
887
+ def tree_flatten_with_path(
888
+ tree: PyTree,
889
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
890
+ ) -> Tuple[List[Tuple[KeyPath, Any]], TreeSpec]:
891
+ """Flattens a pytree like :func:`tree_flatten`, but also returns each leaf's key path.
892
+
893
+ Args:
894
+ tree: a pytree to flatten. If it contains a custom type, that type must be
895
+ registered with an appropriate `tree_flatten_with_path_fn` when registered
896
+ with :func:`register_pytree_node`.
897
+ is_leaf: An extra leaf predicate function that will be called at each
898
+ flattening step. The function should have a single argument with signature
899
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
900
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
901
+ leaf or not. If the function is not specified, the default pytree registry will be used.
902
+ Returns:
903
+ A tuple where the first element is a list of (key path, leaf) pairs, and the
904
+ second element is a :class:`TreeSpec` representing the structure of the flattened
905
+ tree.
906
+ """
907
+ raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
908
+
909
+
910
+ def tree_leaves_with_path(
911
+ tree: PyTree,
912
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
913
+ ) -> List[Tuple[KeyPath, Any]]:
914
+ """Gets the leaves of a pytree like ``tree_leaves`` and returns each leaf's key path.
915
+
916
+ Args:
917
+ tree: a pytree. If it contains a custom type, that type must be
918
+ registered with an appropriate `tree_flatten_with_path_fn` when registered
919
+ with :func:`register_pytree_node`.
920
+ is_leaf: An extra leaf predicate function that will be called at each
921
+ flattening step. The function should have a single argument with signature
922
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
923
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
924
+ leaf or not. If the function is not specified, the default pytree registry will be used.
925
+ Returns:
926
+ A list of (key path, leaf) pairs.
927
+ """
928
+ raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
929
+
930
+
931
+ def tree_map_with_path(
932
+ func: Callable[..., Any],
933
+ tree: PyTree,
934
+ *rests: PyTree,
935
+ is_leaf: Optional[Callable[[PyTree], bool]] = None,
936
+ ) -> PyTree:
937
+ """Like :func:`tree_map`, but the provided callable takes an additional key path argument.
938
+
939
+ Args:
940
+ func: A function that takes ``2 + len(rests)`` arguments, to be applied at the
941
+ corresponding leaves of the pytrees. The first positional argument
942
+ to ``func`` is the key path of the leaf in question. The second
943
+ positional argument is the value of the leaf.
944
+ tree: A pytree to be mapped over, with each leaf providing the first positional
945
+ argument to function ``func``.
946
+ rests: A tuple of pytrees, each of which has the same structure as
947
+ ``tree`` or has ``tree`` as a prefix.
948
+ is_leaf: An extra leaf predicate function that will be called at each
949
+ flattening step. The function should have a single argument with signature
950
+ ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated
951
+ as a leaf. Otherwise, the default pytree registry will be used to determine a node is a
952
+ leaf or not. If the function is not specified, the default pytree registry will be used.
953
+
954
+ Returns
955
+ A new pytree with the same structure as ``tree`` but with the value at each leaf given by
956
+ ``func(keypath, x, *xs)`` where ``keypath`` is the key path at the
957
+ corresponding leaf in ``tree``, ``x`` is the value at that leaf, and
958
+ ``xs`` is the tuple of values at corresponding nodes in ``rests``.
959
+ """
960
+ raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
961
+
962
+
963
+ def keystr(kp: KeyPath) -> str:
964
+ """Given a key path, return a pretty-printed representation."""
965
+ raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
966
+
967
+
968
+ def key_get(obj: Any, kp: KeyPath) -> Any:
969
+ """Given an object and a key path, return the value at the key path."""
970
+ raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.")
llmeval-env/lib/python3.10/site-packages/torch/utils/_import_utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import importlib.util
3
+
4
+ import torch
5
+
6
+
7
+ def _check_module_exists(name: str) -> bool:
8
+ r"""Returns if a top-level module with :attr:`name` exists *without**
9
+ importing it. This is generally safer than try-catch block around a
10
+ `import X`. It avoids third party libraries breaking assumptions of some of
11
+ our tests, e.g., setting multiprocessing start method when imported
12
+ (see librosa/#747, torchvision/#544).
13
+ """
14
+ try:
15
+ spec = importlib.util.find_spec(name)
16
+ return spec is not None
17
+ except ImportError:
18
+ return False
19
+
20
+
21
+ @functools.lru_cache
22
+ def dill_available():
23
+ return (
24
+ _check_module_exists("dill")
25
+ # dill fails to import under torchdeploy
26
+ and not torch._running_with_deploy()
27
+ )
28
+
29
+
30
+ @functools.lru_cache
31
+ def import_dill():
32
+ if not dill_available():
33
+ return None
34
+
35
+ import dill
36
+
37
+ # XXX: By default, dill writes the Pickler dispatch table to inject its
38
+ # own logic there. This globally affects the behavior of the standard library
39
+ # pickler for any user who transitively depends on this module!
40
+ # Undo this extension to avoid altering the behavior of the pickler globally.
41
+ dill.extend(use_dill=False)
42
+ return dill
llmeval-env/lib/python3.10/site-packages/torch/utils/_stats.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NOTE! PLEASE KEEP THIS FILE *FREE* OF TORCH DEPS! IT SHOULD BE IMPORTABLE ANYWHERE.
2
+ # IF YOU FEEL AN OVERWHELMING URGE TO ADD A TORCH DEP, MAKE A TRAMPOLINE FILE A LA torch._dynamo.utils
3
+ # AND SCRUB AWAY TORCH NOTIONS THERE.
4
+ import collections
5
+ import functools
6
+ from typing import OrderedDict
7
+
8
+ simple_call_counter: OrderedDict[str, int] = collections.OrderedDict()
9
+
10
+ def count_label(label):
11
+ prev = simple_call_counter.setdefault(label, 0)
12
+ simple_call_counter[label] = prev + 1
13
+
14
+ def count(fn):
15
+ @functools.wraps(fn)
16
+ def wrapper(*args, **kwargs):
17
+ if fn.__qualname__ not in simple_call_counter:
18
+ simple_call_counter[fn.__qualname__] = 0
19
+ simple_call_counter[fn.__qualname__] = simple_call_counter[fn.__qualname__] + 1
20
+ return fn(*args, **kwargs)
21
+ return wrapper
llmeval-env/lib/python3.10/site-packages/torch/utils/_traceback.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from types import TracebackType
2
+ from typing import List, Optional
3
+ import tempfile
4
+ import traceback
5
+ import contextlib
6
+ import inspect
7
+ import os.path
8
+
9
+ # This file contains utilities for ensuring dynamically compile()'d
10
+ # code fragments display their line numbers in backtraces.
11
+ #
12
+ # The constraints:
13
+ #
14
+ # - We don't have control over the user exception printer (in particular,
15
+ # we cannot assume the linecache trick will work, c.f.
16
+ # https://stackoverflow.com/q/50515651/23845 )
17
+ #
18
+ # - We don't want to create temporary files every time we compile()
19
+ # some code; file creation should happen lazily only at exception
20
+ # time. Arguably, you *should* be willing to write out your
21
+ # generated Python code to file system, but in some situations
22
+ # (esp. library code) it would violate user expectation to write
23
+ # to the file system, so we try to avoid it. In particular, we'd
24
+ # like to keep the files around, so users can open up the files
25
+ # mentioned in the trace; if the file is invisible, we want to
26
+ # avoid clogging up the filesystem.
27
+ #
28
+ # If this is not a constraint for you, there is a substantially simpler
29
+ # way to implement the functionality in this PR: instead of using
30
+ # eval/exec directly, just always write a Python file to filesystem
31
+ # and compile that.
32
+ #
33
+ # - You have control over a context where the compiled code will get
34
+ # executed, so that we can interpose while the stack is unwinding
35
+ # (otherwise, we have no way to interpose on the exception printing
36
+ # process.)
37
+ #
38
+ # There are two things you have to do to make use of the utilities here:
39
+ #
40
+ # - When you compile your source code, you must save its string source
41
+ # in its f_globals under the magic name "__compile_source__"
42
+ #
43
+ # - Before running the compiled code, enter the
44
+ # report_compile_source_on_error() context manager.
45
+
46
+ @contextlib.contextmanager
47
+ def report_compile_source_on_error():
48
+ try:
49
+ yield
50
+ except Exception as exc:
51
+ tb = exc.__traceback__
52
+
53
+ # Walk the traceback, looking for frames that have
54
+ # source attached
55
+ stack = []
56
+ while tb is not None:
57
+ filename = tb.tb_frame.f_code.co_filename
58
+ source = tb.tb_frame.f_globals.get("__compile_source__")
59
+
60
+ if filename == "<string>" and source is not None:
61
+ # What black magic are we doing here? Intuitively, what
62
+ # we would like to do is overwrite the co_filename on any
63
+ # frames that were generated from exec/eval so that they
64
+ # point to a temporary file that has the actual line
65
+ # information, so Python's default error printer can print
66
+ # useful line information on it.
67
+ #
68
+ # Writing out the temporary file is easy. But overwriting
69
+ # co_filename is not! You can't modify the code object
70
+ # associated with a frame. You can, however, reconstruct
71
+ # a traceback with entirely new frames from scratch, so that's
72
+ # what we do. But there's another problem, which is how to
73
+ # make the frame?
74
+ #
75
+ # The black magic is we make a frankenstein frame and code
76
+ # object which resembles the original frame/code enough so
77
+ # that it will print properly under traceback and the default
78
+ # error printer, but IT IS NOT THE ORIGINAL FRAME (you
79
+ # couldn't, e.g., execute its code with different variables
80
+ # and expect it to work.)
81
+
82
+ # Don't delete the temporary file so the user can inspect it
83
+ # TODO: This creates a temporary file for every frame, but we
84
+ # technically only need one per distinct __compile_source__
85
+ with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=".py") as f:
86
+ f.write(source)
87
+ # Create a frame. Python doesn't let you construct
88
+ # FrameType directly, so just make one with compile
89
+ frame = tb.tb_frame
90
+ code = compile('__inspect_currentframe()', f.name, 'eval')
91
+ code = code.replace(co_name=frame.f_code.co_name)
92
+ # Python 3.11 only
93
+ if hasattr(frame.f_code, 'co_linetable'):
94
+ # We can't copy ALL of the metadata over, because you
95
+ # can cause Python to segfault this way. What exactly
96
+ # do we need? We need enough information for
97
+ # traceback to be able to print the exception
98
+ # correctly. Code reading Lib/traceback.py reveals
99
+ # that traceback calls code.co_positions() in order to
100
+ # get the augmented line/col numbers. Objects/codeobject.c,
101
+ # specifically _PyCode_InitAddressRange, reveals that
102
+ # this iterator is initialized from co_linetable and
103
+ # co_firstfileno. So copy these we must!
104
+ code = code.replace( # type: ignore[call-arg]
105
+ co_linetable=frame.f_code.co_linetable, # type: ignore[attr-defined]
106
+ co_firstlineno=frame.f_code.co_firstlineno, # type: ignore[attr-defined]
107
+ )
108
+ fake_frame = eval(
109
+ code,
110
+ frame.f_globals,
111
+ {
112
+ **frame.f_locals,
113
+ '__inspect_currentframe': inspect.currentframe
114
+ }
115
+ )
116
+ fake_tb = TracebackType(
117
+ None, fake_frame, tb.tb_lasti, tb.tb_lineno
118
+ )
119
+ stack.append(fake_tb)
120
+ else:
121
+ stack.append(tb)
122
+
123
+ tb = tb.tb_next
124
+
125
+ # Reconstruct the linked list
126
+ tb_next = None
127
+ for tb in reversed(stack):
128
+ tb.tb_next = tb_next
129
+ tb_next = tb
130
+
131
+ raise exc.with_traceback(tb_next) # noqa: TRY200
132
+
133
+ def shorten_filename(fn, *, base=None):
134
+ """Shorten a source filepath, with the assumption that torch/ subdirectories don't need to be shown to user."""
135
+ if base is None:
136
+ base = os.path.dirname(os.path.dirname(__file__))
137
+ # Truncate torch/foo.py to foo.py
138
+ try:
139
+ prefix = os.path.commonpath([fn, base])
140
+ except ValueError:
141
+ return fn
142
+ else:
143
+ return fn[len(prefix) + 1:]
144
+
145
+ def format_frame(frame, *, base=None, line=False):
146
+ """
147
+ Format a FrameSummary in a short way, without printing full absolute path or code.
148
+
149
+ The idea is the result fits on a single line.
150
+ """
151
+ extra_line = ""
152
+ if line:
153
+ extra_line = f"{frame.line} # "
154
+ return f"{extra_line}{shorten_filename(frame.filename, base=base)}:{frame.lineno} in {frame.name}"
155
+
156
+ def format_traceback_short(tb):
157
+ """Format a TracebackType in a short way, printing only the inner-most frame."""
158
+ return format_frame(traceback.extract_tb(tb)[-1])
159
+
160
+ class CapturedTraceback:
161
+ __slots__ = ['tb', 'skip']
162
+
163
+ def __init__(self, tb, skip=0):
164
+ self.tb = tb
165
+ self.skip = skip
166
+
167
+ def cleanup(self):
168
+ self.tb = None
169
+
170
+ def summary(self):
171
+ import torch._C._profiler
172
+
173
+ if self.tb is None:
174
+ # TODO: Maybe indicate that the traceback was elided?
175
+ return traceback.StackSummary()
176
+
177
+ return _extract_symbolized_tb(
178
+ torch._C._profiler.symbolize_tracebacks([self.tb])[0],
179
+ self.skip
180
+ )
181
+
182
+ def __getstate__(self):
183
+ return (None, {
184
+ 'tb': None, # TB is not pickleable
185
+ 'skip': self.skip,
186
+ })
187
+
188
+ @staticmethod
189
+ def extract(*, script=False, cpp=False, skip=0):
190
+ """
191
+ Like traceback.extract_stack(), but faster (approximately 20x faster); it
192
+ is fast enough that you can unconditionally log stacks this way as part of
193
+ normal execution. It returns a torch._C._profiler.CapturedTraceback
194
+ object that must be formatted specially with format_captured_tb.
195
+
196
+ By default, this only reports Python backtraces (like extract_stack). You
197
+ can set the script/cpp kwargs to also turn on TorchScript/C++ trace
198
+ reporting.
199
+ """
200
+ import torch._C._profiler
201
+
202
+ if script or cpp:
203
+ assert skip == 0, "skip with script/cpp NYI"
204
+
205
+ return CapturedTraceback(
206
+ torch._C._profiler.gather_traceback(python=True, script=script, cpp=cpp),
207
+ # Elide extract() frame if we don't have script/cpp frames. If
208
+ # we do have those frames, it doesn't work so force zero.
209
+ 0 if script or cpp else skip + 1
210
+ )
211
+
212
+ def format(self):
213
+ """
214
+ Formats a single torch._C._profiler.CapturedTraceback into a list of
215
+ strings equivalent to the output of traceback.format_list. Note that if
216
+ pass it CapturedTraceback with C++ traces, it is better not to use this
217
+ function and use the batch formatting API format_captured_tbs to amortize
218
+ the cost of symbolization
219
+ """
220
+ return traceback.format_list(self.summary())
221
+
222
+ @staticmethod
223
+ def format_all(tbs):
224
+ """
225
+ Bulk version of CapturedTraceback.format. Returns a list of list of strings.
226
+ """
227
+ import torch._C._profiler
228
+
229
+ # Directly populate tracebacks that already have cached summaries
230
+ rs: List[Optional[List[str]]] = []
231
+ delayed_idxs = []
232
+ for i, tb in enumerate(tbs):
233
+ if tb.tb is None:
234
+ rs.append([])
235
+ else:
236
+ rs.append(None)
237
+ delayed_idxs.append(i)
238
+
239
+ stbs = torch._C._profiler.symbolize_tracebacks([tbs[i].tb for i in delayed_idxs])
240
+ for i, stb in zip(delayed_idxs, stbs):
241
+ rs[i] = traceback.format_list(tbs[i].summary())
242
+
243
+ return rs
244
+
245
+
246
+ def _extract_symbolized_tb(tb, skip):
247
+ """
248
+ Given a symbolized traceback from symbolize_tracebacks, return a StackSummary object of
249
+ pre-processed stack trace entries.
250
+ """
251
+ stack = traceback.StackSummary()
252
+ for f in reversed(tb[skip:]):
253
+ stack.append(traceback.FrameSummary(f['filename'], f['line'], f['name']))
254
+ return stack
llmeval-env/lib/python3.10/site-packages/torch/utils/backend_registration.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch._C import _rename_privateuse1_backend, _get_privateuse1_backend_name
3
+ from typing import List, Optional, Union
4
+
5
+ __all__ = ["rename_privateuse1_backend", "generate_methods_for_privateuse1_backend"]
6
+
7
+ # TODO: Should use `torch._C._get_privateuse1_backend_name()` to get
8
+ # renamed-backend name for `privateuse1`, but the func will cause an
9
+ # error with torch.jit.script, so we use the global variable named
10
+ # `_privateuse1_backend_name`.
11
+ _privateuse1_backend_name = "privateuseone"
12
+
13
+ def rename_privateuse1_backend(backend_name: str) -> None:
14
+ r"""
15
+ Rename the privateuse1 backend device to make it more convenient to use as a device name within PyTorch APIs.
16
+
17
+ The steps are:
18
+
19
+ (1) (In C++) implement kernels for various torch operations, and register them
20
+ to the PrivateUse1 dispatch key.
21
+ (2) (In python) call torch.utils.rename_privateuse1_backend("foo")
22
+
23
+ You can now use "foo" as an ordinary device string in python.
24
+
25
+ Note: this API can only be called once per process. Attempting to change
26
+ the external backend after it's already been set will result in an error.
27
+
28
+ Note(AMP): If you want to support AMP on your device, you can register a custom backend module.
29
+ The backend must register a custom backend module with ``torch._register_device_module("foo", BackendModule)``.
30
+ BackendModule needs to have the following API's:
31
+
32
+ (1) ``get_amp_supported_dtype() -> List[torch.dtype]``
33
+ get the supported dtypes on your "foo" device in AMP, maybe the "foo" device supports one more dtype.
34
+
35
+ (2) ``is_autocast_enabled() -> bool``
36
+ check the AMP is enabled or not on your "foo" device.
37
+
38
+ (3) ``get_autocast_dtype() -> torch.dtype``
39
+ get the supported dtype on your "foo" device in AMP, which is set by ``set_autocast_dtype`` or the
40
+ default dtype, and the default dtype is ``torch.float16``.
41
+
42
+ (4) ``set_autocast_enabled(bool) -> None``
43
+ enable the AMP or not on your "foo" device.
44
+
45
+ (5) ``set_autocast_dtype(dtype) -> None``
46
+ set the supported dtype on your "foo" device in AMP, and the dtype be contained in the dtypes got
47
+ from ``get_amp_supported_dtype``.
48
+
49
+ Note(random): If you want to support to set seed for your device, BackendModule needs to have the following API's:
50
+
51
+ (1) ``_is_in_bad_fork() -> bool``
52
+ Return ``True`` if now it is in bad_fork, else return ``False``.
53
+
54
+ (2) ``manual_seed_all(seed int) -> None``
55
+ Sets the seed for generating random numbers for your devices.
56
+
57
+ (3) ``device_count() -> int``
58
+ Returns the number of "foo"s available.
59
+
60
+ (4) ``get_rng_state(device: Union[int, str, torch.device] = 'foo') -> Tensor``
61
+ Returns a list of ByteTensor representing the random number states of all devices.
62
+
63
+ (5) ``set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'foo') -> None``
64
+ Sets the random number generator state of the specified "foo" device.
65
+
66
+ And there are some common funcs:
67
+
68
+ (1) ``is_available() -> bool``
69
+ Returns a bool indicating if "foo" is currently available.
70
+
71
+ (2) ``current_device() -> int``
72
+ Returns the index of a currently selected device.
73
+
74
+ For more details, see https://pytorch.org/tutorials/advanced/extend_dispatcher.html#get-a-dispatch-key-for-your-backend
75
+ For an existing example, see https://github.com/bdhirsh/pytorch_open_registration_example
76
+
77
+ Example::
78
+
79
+ >>> # xdoctest: +SKIP("failing")
80
+ >>> torch.utils.rename_privateuse1_backend("foo")
81
+ # This will work, assuming that you've implemented the right C++ kernels
82
+ # to implement torch.ones.
83
+ >>> a = torch.ones(2, device="foo")
84
+
85
+ """
86
+ _rename_privateuse1_backend(backend_name)
87
+ global _privateuse1_backend_name
88
+ _privateuse1_backend_name = backend_name
89
+
90
+ def _check_register_once(module, attr):
91
+ if hasattr(module, attr):
92
+ raise RuntimeError(f"The custom device module of {module} has already been registered with {attr}")
93
+
94
+
95
+ def _normalization_device(custom_backend_name: str, device: Optional[Union[int, str, torch.device]] = None) -> int:
96
+ def _get_current_device_index():
97
+ _get_device_index = "current_device"
98
+ if hasattr(torch, custom_backend_name) and \
99
+ hasattr(getattr(torch, custom_backend_name), _get_device_index):
100
+ return getattr(getattr(torch, custom_backend_name), _get_device_index)()
101
+ else:
102
+ # The default device index is 0.
103
+ return 0
104
+
105
+ if device is None:
106
+ return _get_current_device_index()
107
+ # if isinstance(device, str), this means that the parameter passed in is in the string format "foo:0"
108
+ # convert str object to torch.device object, and then process it uniformly
109
+ elif isinstance(device, str):
110
+ device = torch.device(device)
111
+
112
+ # variable devcie can only be torch.device type or int type
113
+ if isinstance(device, torch.device):
114
+ if device.type != custom_backend_name:
115
+ raise RuntimeError(f"Invalid device, must be {custom_backend_name} device")
116
+ elif device.index is None:
117
+ device_idx = _get_current_device_index()
118
+ else:
119
+ device_idx = device.index
120
+ # if isinstance(device, int), we can take the index number directly
121
+ else:
122
+ device_idx = device
123
+ return device_idx
124
+
125
+
126
+ def _generate_tensor_methods_for_privateuse1_backend(custom_backend_name: str) -> None:
127
+ @property # type: ignore[misc]
128
+ def wrap_tensor_backend(self: torch.Tensor) -> bool:
129
+ return self.device.type == custom_backend_name
130
+
131
+ _check_register_once(torch.Tensor, f'is_{custom_backend_name}')
132
+ setattr(torch.Tensor, f'is_{custom_backend_name}', wrap_tensor_backend)
133
+
134
+ def wrap_tensor_to(self: torch.Tensor, device: Optional[Union[int, torch.device]] = None, non_blocking=False,
135
+ **kwargs) -> torch.Tensor:
136
+ r"""Perform Tensor device conversion. Call the to operator implementation.
137
+
138
+ .. note::
139
+ If the ``self`` Tensor already
140
+ has the correct :class:`torch.device`, then ``self`` is returned.
141
+ Otherwise, the returned tensor is a copy of ``self`` with the desired :class:`torch.device`.
142
+
143
+ Args:
144
+ device (int, optional): if specified, all parameters will be copied to that device
145
+ non_blocking (bool): If ``True`` and the source is in pinned memory,
146
+ the copy will be asynchronous with respect to the host. Otherwise,
147
+ the argument has no effect.
148
+ **kwargs (dict): For compatibility, may contain the key ``memory_format`` argument.
149
+ """
150
+ device_idx = _normalization_device(custom_backend_name, device)
151
+ return self.to(device=torch.device(f'{custom_backend_name}:{device_idx}'), non_blocking=non_blocking, **kwargs)
152
+
153
+ _check_register_once(torch.Tensor, custom_backend_name)
154
+ setattr(torch.Tensor, custom_backend_name, wrap_tensor_to)
155
+
156
+
157
+ def _generate_module_methods_for_privateuse1_backend(custom_backend_name: str) -> None:
158
+ # Generate Module attributes and methods depends on Tensor methods,
159
+ # so we need to check whether Tensor methods is already registered.
160
+ if not hasattr(torch.Tensor, custom_backend_name):
161
+ raise RuntimeError(
162
+ f"Can not automatically generate {custom_backend_name}() method for torch.nn.Module."
163
+ f"Because torch.Tensor doesn't has the method {custom_backend_name}()."
164
+ f"For this error, you can try setting for_tensor=True.")
165
+
166
+ def wrap_module_to(self: torch.nn.modules.module.T,
167
+ device: Optional[Union[int, torch.device]] = None) -> torch.nn.modules.module.T:
168
+ r"""Move all model parameters and buffers to the custom device.
169
+
170
+ This also makes associated parameters and buffers different objects. So
171
+ it should be called before constructing optimizer if the module will
172
+ live on device while being optimized.
173
+
174
+ .. note::
175
+ This method modifies the module in-place.
176
+
177
+ Args:
178
+ device (int, optional): if specified, all parameters will be copied to that device
179
+ """
180
+ return self._apply(lambda t: getattr(t, custom_backend_name)(device))
181
+
182
+ _check_register_once(torch.nn.Module, custom_backend_name)
183
+ setattr(torch.nn.Module, custom_backend_name, wrap_module_to)
184
+
185
+
186
+ def _generate_storage_methods_for_privateuse1_backend(custom_backend_name: str,
187
+ unsupported_dtype: Optional[List[torch.dtype]] = None) -> None:
188
+ # Attribute is registered in the _StorageBase class
189
+ # and UntypedStorage obtains through inheritance.
190
+ @property # type: ignore[misc]
191
+ def wrap_storage_backend(self: torch.storage._StorageBase) -> bool:
192
+ r"""Return the internal :class:`torch.UntypedStorage`."""
193
+ return self.device.type == custom_backend_name
194
+
195
+ _check_register_once(torch.storage._StorageBase, f'is_{custom_backend_name}')
196
+ setattr(torch.storage._StorageBase, f'is_{custom_backend_name}', wrap_storage_backend)
197
+
198
+ def wrap_storage_to(self, device=None, non_blocking=False):
199
+ r"""Return a copy of this object in custom device memory.
200
+
201
+ If this object is already in device memory and on the correct device, then
202
+ no copy is performed and the original object is returned.
203
+
204
+ Args:
205
+ device (int): The destination device id. Defaults to the current device.
206
+ non_blocking (bool): If ``True`` and the source is in pinned memory,
207
+ the copy will be asynchronous with respect to the host. Otherwise,
208
+ the argument has no effect.
209
+ """
210
+ # There should be a judgment related to storage device and a judgment related to storage type,
211
+ # but it depends on the extended function, so this part is temporarily omitted in the automatic generation.
212
+ device_idx = _normalization_device(custom_backend_name, device)
213
+
214
+ if getattr(self, f'is_{custom_backend_name}'):
215
+ # storage has already on expected device.
216
+ if self.get_device() == device_idx:
217
+ return self
218
+ # For sparse storage, custom need to extend the implementation by themselves.
219
+ if self.is_sparse:
220
+ raise RuntimeError(f"Can not support a sparse storage move to {custom_backend_name} backend")
221
+ # create untyped_storage and copy data
222
+ untyped_storage = torch.UntypedStorage(
223
+ self.size(), device=torch.device(f'{custom_backend_name}:{device_idx}')
224
+ )
225
+ untyped_storage.copy_(self, non_blocking)
226
+ return untyped_storage
227
+
228
+ _check_register_once(torch.storage._StorageBase, custom_backend_name)
229
+ setattr(torch.storage._StorageBase, custom_backend_name, wrap_storage_to)
230
+
231
+ # Register the corresponding attribute for the TypedStorage class.
232
+ # When the TypedStorage class is removed, the registration is also removed.
233
+
234
+ @property # type: ignore[misc]
235
+ def wrap_typed_storage_backend(self: torch.storage.TypedStorage) -> bool:
236
+ torch.storage._warn_typed_storage_removal()
237
+ return self._untyped_storage.device.type == custom_backend_name
238
+
239
+ _check_register_once(torch.TypedStorage, f'is_{custom_backend_name}')
240
+ setattr(torch.storage.TypedStorage, f'is_{custom_backend_name}', wrap_typed_storage_backend)
241
+
242
+ def wrap_typed_storage_to(self: torch.storage.TypedStorage,
243
+ device=None, non_blocking=False, **kwargs) -> torch.storage.TypedStorage:
244
+ torch.storage._warn_typed_storage_removal()
245
+ if unsupported_dtype and self.dtype in unsupported_dtype:
246
+ raise RuntimeError(f"Cannot create {custom_backend_name} storage "
247
+ f"as {self.dtype} dtype is not supported by this backend")
248
+ custom_backend_storage: torch.UntypedStorage = getattr(
249
+ self._untyped_storage, custom_backend_name)(device, non_blocking, **kwargs)
250
+ return self._new_wrapped_storage(custom_backend_storage)
251
+
252
+ _check_register_once(torch.TypedStorage, custom_backend_name)
253
+ setattr(torch.TypedStorage, custom_backend_name, wrap_typed_storage_to)
254
+
255
+
256
+ def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module: bool = True,
257
+ for_storage: bool = False,
258
+ unsupported_dtype: Optional[List[torch.dtype]] = None) -> None:
259
+ r"""
260
+ Automatically generate attributes and methods for the custom backend after rename privateuse1 backend.
261
+
262
+ In the default scenario, storage-related methods will not be generated automatically.
263
+
264
+ When you implement kernels for various torch operations, and register them to the PrivateUse1 dispatch key.
265
+ And call the function torch.rename_privateuse1_backend("foo") to rename your backend name.
266
+ At this point, you can easily register specific methods and attributes by calling this function.
267
+ Just like torch.Tensor.foo(), torch.Tensor.is_foo, torch.Storage.foo(), torch.Storage.is_foo.
268
+
269
+ Note: We recommend you use generic functions (check devices are equal or to(device=)).
270
+ We provide these methods for convenience only and they will be "monkey patched" onto the objects
271
+ and so will not be properly typed. For Storage methods generate, if you need to support sparse data storage,
272
+ you need to extend the implementation yourself.
273
+
274
+ Args:
275
+ for_tensor (bool): whether register related methods for torch.Tensor class.
276
+ for_module (bool): whether register related methods for torch.nn.Module class.
277
+ for_storage (bool): whether register related methods for torch.Storage class.
278
+ unsupported_dtype (List[torch.dtype]): takes effect only when the storage method needs to be generated,
279
+ indicating that the storage does not support the torch.dtype type.
280
+
281
+ Example::
282
+
283
+ >>> # xdoctest: +SKIP("failing")
284
+ >>> torch.utils.rename_privateuse1_backend("foo")
285
+ >>> torch.utils.generate_methods_for_privateuse1_backend()
286
+ # Then automatically generate backend-related attributes and methods.
287
+ >>> a = torch.tensor(2).foo()
288
+ >>> a.is_foo
289
+ >>> hasattr(torch.nn.Module, 'foo')
290
+ """
291
+ custom_backend_name = _get_privateuse1_backend_name()
292
+
293
+ if for_tensor:
294
+ _generate_tensor_methods_for_privateuse1_backend(custom_backend_name)
295
+
296
+ if for_module:
297
+ _generate_module_methods_for_privateuse1_backend(custom_backend_name)
298
+
299
+ if for_storage:
300
+ _generate_storage_methods_for_privateuse1_backend(custom_backend_name, unsupported_dtype)
301
+
302
+ def _get_custom_mod_func(func_name: str):
303
+ r"""
304
+ Return the func named `func_name` defined in custom device module. If not defined,
305
+ return `None`. And the func is registered with `torch.utils.rename_privateuse1_backend('foo')`
306
+ and `torch._register_device_module('foo', BackendModule)`.
307
+ If the custom device module or the func is not defined, it will give warning or error message.
308
+ Args:
309
+ func_name (str): return the callable func named func_name defined in custom device module.
310
+ Example::
311
+ class DummyfooModule:
312
+ @staticmethod
313
+ def is_available():
314
+ return True
315
+ @staticmethod
316
+ def func_name(*args, **kwargs):
317
+ ....
318
+ torch.utils.rename_privateuse1_backend("foo")
319
+ torch._register_device_module("foo", DummyfooModule)
320
+ foo_is_available_func = torch.utils.backend_registration._get_custom_mod_func("is_available")
321
+ if foo_is_available_func:
322
+ foo_is_available = foo_is_available_func()
323
+ func_ = torch.utils.backend_registration._get_custom_mod_func("func_name")
324
+ if func_:
325
+ result = func_(*args, **kwargs)
326
+ Attention: This function is not meant to be used directly by users, which is why
327
+ it is marked as private. It is a convenience function for backend implementers to
328
+ more easily call the hooks into their backend extensions.
329
+ """
330
+ assert isinstance(func_name, str), f"func_name must be `str`, but got `{type(func_name)}`."
331
+ backend_name = _get_privateuse1_backend_name()
332
+ custom_device_mod = getattr(torch, backend_name, None) # type: ignore[arg-type]
333
+ function = getattr(custom_device_mod, func_name, None) # type: ignore[arg-type]
334
+ if custom_device_mod is None or function is None:
335
+ message = f'Try to call torch.{backend_name}.{func_name}. The backend must register a custom backend '
336
+ message += f"module with `torch._register_device_module('{backend_name}', BackendModule)`. And "
337
+ message += f"BackendModule needs to have the following API's:\n `{func_name}(*args, **kwargs)`. \n"
338
+ raise RuntimeError(message)
339
+ return function
llmeval-env/lib/python3.10/site-packages/torch/utils/checkpoint.py ADDED
@@ -0,0 +1,1439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import platform
3
+ import uuid
4
+ import warnings
5
+ import weakref
6
+ from collections import defaultdict
7
+ from itertools import count
8
+ from typing import (
9
+ Any,
10
+ Callable,
11
+ ContextManager,
12
+ DefaultDict,
13
+ Dict,
14
+ Iterable,
15
+ List,
16
+ Optional,
17
+ Tuple,
18
+ )
19
+ from weakref import ReferenceType
20
+
21
+ import torch
22
+ import torch.fx.traceback as fx_traceback
23
+ from torch._functorch._aot_autograd.functional_utils import is_fun
24
+ from torch.utils._pytree import tree_map
25
+ from torch.testing._internal.logging_tensor import capture_logs, LoggingTensorMode
26
+ from torch.utils._python_dispatch import TorchDispatchMode
27
+
28
+ __all__ = [
29
+ "checkpoint",
30
+ "checkpoint_sequential",
31
+ "CheckpointError",
32
+ "CheckpointFunction",
33
+ "check_backward_validity",
34
+ "detach_variable",
35
+ "get_device_states",
36
+ "set_device_states",
37
+ "noop_context_fn",
38
+ "set_checkpoint_early_stop",
39
+ "DefaultDeviceType",
40
+ "set_checkpoint_debug_enabled",
41
+ ]
42
+
43
+ _DEFAULT_DETERMINISM_MODE = "default"
44
+
45
+ _checkpoint_debug_enabled: Optional[bool] = None
46
+
47
+
48
+ @contextlib.contextmanager
49
+ def set_checkpoint_debug_enabled(enabled: Optional[bool]):
50
+ """
51
+ Context manager that sets whether checkpoint should print additional debug
52
+ information when running. See the ``debug`` flag for
53
+ :func:`~torch.utils.checkpoint.checkpoint` for more information. Note that
54
+ when set, this context manager overrides the value of ``debug`` passed to
55
+ checkpoint. To defer to the local setting, pass ``None`` to this context.
56
+
57
+ Args:
58
+ enabled (bool): Whether checkpoint should print debug information.
59
+ Default is 'None'.
60
+ """
61
+ global _checkpoint_debug_enabled
62
+ try:
63
+ prev = _checkpoint_debug_enabled
64
+ _checkpoint_debug_enabled = enabled
65
+ yield
66
+ finally:
67
+ _checkpoint_debug_enabled = prev
68
+
69
+
70
+ def detach_variable(inputs: Tuple[Any, ...]) -> Tuple[torch.Tensor, ...]:
71
+ if isinstance(inputs, tuple):
72
+ out = []
73
+ for inp in inputs:
74
+ if not isinstance(inp, torch.Tensor):
75
+ out.append(inp)
76
+ continue
77
+
78
+ x = inp.detach()
79
+ x.requires_grad = inp.requires_grad
80
+ out.append(x)
81
+ return tuple(out)
82
+ else:
83
+ raise RuntimeError(
84
+ "Only tuple of tensors is supported. Got Unsupported input type: ",
85
+ type(inputs).__name__,
86
+ )
87
+
88
+
89
+ def check_backward_validity(inputs: Iterable[Any]) -> None:
90
+ if not any(inp.requires_grad for inp in inputs if isinstance(inp, torch.Tensor)):
91
+ warnings.warn(
92
+ "None of the inputs have requires_grad=True. Gradients will be None"
93
+ )
94
+
95
+
96
+ def _get_device_module(device="cuda"):
97
+ device_module = getattr(torch, device)
98
+ return device_module
99
+
100
+
101
+ class DefaultDeviceType:
102
+ r"""
103
+ A class that manages the default device type for checkpointing.
104
+
105
+ If no non-CPU tensors are present, the default device type will
106
+ be used. The default value is 'cuda'. The device type is used in
107
+ the checkpointing process when determining which device states
108
+ to save and restore for recomputation.
109
+ """
110
+
111
+ _default_device_type = "cuda"
112
+
113
+ @staticmethod
114
+ def set_device_type(device: str = "cuda"):
115
+ """
116
+ Set the default device type for checkpointing.
117
+
118
+ Args:
119
+ device (str): The device type to be set as default. Default is 'cuda'.
120
+ """
121
+ DefaultDeviceType._default_device_type = device
122
+
123
+ @staticmethod
124
+ def get_device_type() -> str:
125
+ """
126
+ Get the current default device type for checkpointing.
127
+
128
+ Returns:
129
+ str: The current default device type.
130
+ """
131
+ return DefaultDeviceType._default_device_type
132
+
133
+
134
+ def _infer_device_type(*args):
135
+ device_types = list(
136
+ {
137
+ arg.device.type
138
+ for arg in args
139
+ if isinstance(arg, torch.Tensor) and not arg.device.type == "cpu"
140
+ }
141
+ )
142
+ if len(device_types) > 1:
143
+ warnings.warn(
144
+ "Tensor arguments, excluding CPU tensors, are detected on at least two types of devices. "
145
+ "Device state will only be saved for devices of a single device type, and the remaining "
146
+ "devices will be ignored. Consequently, if any checkpointed functions involve randomness, "
147
+ "this may result in incorrect gradients. (Note that if CUDA devices are among the devices "
148
+ "detected, it will be prioritized; otherwise, the first device encountered will be selected.)"
149
+ )
150
+ if len(device_types) == 0:
151
+ return DefaultDeviceType.get_device_type()
152
+ elif "cuda" in device_types:
153
+ return "cuda"
154
+ else:
155
+ return device_types[0]
156
+
157
+
158
+ # We can't know if the run_fn will internally move some args to different devices,
159
+ # which would require logic to preserve rng states for those devices as well.
160
+ # We could paranoically stash and restore ALL the rng states for all visible devices,
161
+ # but that seems very wasteful for most cases. Compromise: Stash the RNG state for
162
+ # the device of all Tensor args.
163
+ #
164
+ # To consider: maybe get_device_states and set_device_states should reside in torch/random.py?
165
+ def get_device_states(*args) -> Tuple[List[int], List[torch.Tensor]]:
166
+ # This will not error out if "arg" is a CPU tensor or a non-tensor type because
167
+ # the conditionals short-circuit.
168
+ fwd_device_ids = list(
169
+ {
170
+ arg.get_device()
171
+ for arg in args
172
+ if isinstance(arg, torch.Tensor) and not arg.device.type == "cpu"
173
+ }
174
+ )
175
+
176
+ fwd_device_states = []
177
+ device_module = _get_device_module(_infer_device_type(*args))
178
+
179
+ for device_id in fwd_device_ids:
180
+ with device_module.device(device_id):
181
+ fwd_device_states.append(device_module.get_rng_state())
182
+
183
+ return fwd_device_ids, fwd_device_states
184
+
185
+
186
+ def set_device_states(devices, states) -> None:
187
+ device_module = _get_device_module(_infer_device_type(*states))
188
+ for device, state in zip(devices, states):
189
+ with device_module.device(device):
190
+ device_module.set_rng_state(state)
191
+
192
+
193
+ def _get_autocast_kwargs(device="cuda"):
194
+ if device == "cuda":
195
+ device_autocast_kwargs = {
196
+ "enabled": torch.is_autocast_enabled(),
197
+ "dtype": torch.get_autocast_gpu_dtype(),
198
+ "cache_enabled": torch.is_autocast_cache_enabled(),
199
+ }
200
+ elif _supports_autocast(device):
201
+ device_module = _get_device_module(device)
202
+ device_autocast_kwargs = {
203
+ "enabled": device_module.is_autocast_enabled(),
204
+ "dtype": device_module.get_autocast_dtype(),
205
+ "cache_enabled": torch.is_autocast_cache_enabled(),
206
+ }
207
+ else:
208
+ device_autocast_kwargs = None
209
+
210
+ cpu_autocast_kwargs = {
211
+ "enabled": torch.is_autocast_cpu_enabled(),
212
+ "dtype": torch.get_autocast_cpu_dtype(),
213
+ "cache_enabled": torch.is_autocast_cache_enabled(),
214
+ }
215
+
216
+ return device_autocast_kwargs, cpu_autocast_kwargs
217
+
218
+ def _supports_autocast(device):
219
+ device_module = _get_device_module(device)
220
+ return device == "cuda" or (hasattr(device_module, "is_autocast_enabled")
221
+ and hasattr(device_module, "get_autocast_dtype"))
222
+
223
+ class CheckpointFunction(torch.autograd.Function):
224
+ @staticmethod
225
+ def forward(ctx, run_function, preserve_rng_state, *args):
226
+ check_backward_validity(args)
227
+ ctx.run_function = run_function
228
+ ctx.preserve_rng_state = preserve_rng_state
229
+ # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu.
230
+ ctx.device = _infer_device_type(*args)
231
+ ctx.device_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs(
232
+ ctx.device
233
+ )
234
+ if preserve_rng_state:
235
+ ctx.fwd_cpu_state = torch.get_rng_state()
236
+ # Don't eagerly initialize the cuda context by accident.
237
+ # (If the user intends that the context is initialized later, within their
238
+ # run_function, we SHOULD actually stash the cuda state here. Unfortunately,
239
+ # we have no way to anticipate this will happen before we run the function.)
240
+ ctx.had_device_in_fwd = False
241
+ device_module = _get_device_module(ctx.device)
242
+ if getattr(device_module, "_initialized", False):
243
+ ctx.had_device_in_fwd = True
244
+ ctx.fwd_devices, ctx.fwd_device_states = get_device_states(*args)
245
+
246
+ # Save non-tensor inputs in ctx, keep a placeholder None for tensors
247
+ # to be filled out during the backward.
248
+ ctx.inputs = []
249
+ ctx.tensor_indices = []
250
+ tensor_inputs = []
251
+ for i, arg in enumerate(args):
252
+ if torch.is_tensor(arg):
253
+ tensor_inputs.append(arg)
254
+ ctx.tensor_indices.append(i)
255
+ ctx.inputs.append(None)
256
+ else:
257
+ ctx.inputs.append(arg)
258
+
259
+ ctx.save_for_backward(*tensor_inputs)
260
+
261
+ with torch.no_grad():
262
+ outputs = run_function(*args)
263
+ return outputs
264
+
265
+ @staticmethod
266
+ def backward(ctx, *args):
267
+ if not torch.autograd._is_checkpoint_valid():
268
+ raise RuntimeError(
269
+ "Checkpointing is not compatible with .grad() or when an `inputs` parameter"
270
+ " is passed to .backward(). Please use .backward() and do not pass its `inputs`"
271
+ " argument."
272
+ )
273
+ # Copy the list to avoid modifying original list.
274
+ inputs = list(ctx.inputs)
275
+ tensor_indices = ctx.tensor_indices
276
+ tensors = ctx.saved_tensors
277
+ device_module = _get_device_module(ctx.device)
278
+
279
+ # Fill in inputs with appropriate saved tensors.
280
+ for i, idx in enumerate(tensor_indices):
281
+ inputs[idx] = tensors[i]
282
+
283
+ # Stash the surrounding rng state, and mimic the state that was
284
+ # present at this time during forward. Restore the surrounding state
285
+ # when we're done.
286
+ rng_devices = []
287
+ if ctx.preserve_rng_state and ctx.had_device_in_fwd:
288
+ rng_devices = ctx.fwd_devices
289
+ with torch.random.fork_rng(
290
+ devices=rng_devices, enabled=ctx.preserve_rng_state, device_type=ctx.device
291
+ ):
292
+ if ctx.preserve_rng_state:
293
+ torch.set_rng_state(ctx.fwd_cpu_state)
294
+ if ctx.had_device_in_fwd:
295
+ set_device_states(ctx.fwd_devices, ctx.fwd_device_states)
296
+ detached_inputs = detach_variable(tuple(inputs))
297
+
298
+ device_autocast_ctx = device_module.amp.autocast(
299
+ **ctx.device_autocast_kwargs
300
+ ) if _supports_autocast(ctx.device) else contextlib.nullcontext()
301
+ with torch.enable_grad(), device_autocast_ctx, \
302
+ torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs):
303
+ outputs = ctx.run_function(*detached_inputs)
304
+
305
+ if isinstance(outputs, torch.Tensor):
306
+ outputs = (outputs,)
307
+
308
+ # run backward() with only tensor that requires grad
309
+ outputs_with_grad = []
310
+ args_with_grad = []
311
+ for i in range(len(outputs)):
312
+ if torch.is_tensor(outputs[i]) and outputs[i].requires_grad:
313
+ outputs_with_grad.append(outputs[i])
314
+ args_with_grad.append(args[i])
315
+ if len(outputs_with_grad) == 0:
316
+ raise RuntimeError(
317
+ "none of output has requires_grad=True,"
318
+ " this checkpoint() is not necessary"
319
+ )
320
+ torch.autograd.backward(outputs_with_grad, args_with_grad)
321
+ grads = tuple(
322
+ inp.grad if isinstance(inp, torch.Tensor) else None
323
+ for inp in detached_inputs
324
+ )
325
+
326
+ return (None, None) + grads
327
+
328
+
329
+ def noop_context_fn():
330
+ return contextlib.nullcontext(), contextlib.nullcontext()
331
+
332
+ # TorchDynamo does not step inside utils.checkpoint function. The flow
333
+ # looks likes this
334
+ # 1) TorchDynamo tries to wrap utils.checkpoint in a HigherOrderOp by
335
+ # speculatively checking if the forward function is safe to trace.
336
+ # 2) If yes, then Dynamo-generated Fx graph has the wrapped higher
337
+ # order op. As a result, TorchDynamo does not look inside utils.checkpoint.
338
+ # 3) If not, then TorchDynamo falls back to eager by performing a graph
339
+ # break. And here, the following disable wrapper ensures that
340
+ # TorchDynamo does not trigger again on the frames created by
341
+ # utils.checkpoint innards.
342
+ @torch._disable_dynamo
343
+ def checkpoint(
344
+ function,
345
+ *args,
346
+ use_reentrant: Optional[bool] = None,
347
+ context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn,
348
+ determinism_check: str = _DEFAULT_DETERMINISM_MODE,
349
+ debug: bool = False,
350
+ **kwargs
351
+ ):
352
+ r"""Checkpoint a model or part of the model.
353
+
354
+ Activation checkpointing is a technique that trades compute for memory.
355
+ Instead of keeping tensors needed for backward alive until they are used in
356
+ gradient computation during backward, forward computation in checkpointed
357
+ regions omits saving tensors for backward and recomputes them during the
358
+ backward pass. Activation checkpointing can be applied to any part of a
359
+ model.
360
+
361
+ There are currently two checkpointing implementations available, determined
362
+ by the :attr:`use_reentrant` parameter. It is recommended that you use
363
+ ``use_reentrant=False``. Please refer the note below for a discussion of
364
+ their differences.
365
+
366
+ .. warning::
367
+
368
+ If the :attr:`function` invocation during the backward pass differs
369
+ from the forward pass, e.g., due to a global variable, the checkpointed
370
+ version may not be equivalent, potentially causing an
371
+ error being raised or leading to silently incorrect gradients.
372
+
373
+ .. warning::
374
+
375
+ The ``use_reentrant`` parameter should be passed explicitly. In version
376
+ 2.4 we will raise an exception if ``use_reentrant`` is not passed.
377
+ If you are using the ``use_reentrant=True`` variant, please refer to the
378
+ note below for important considerations and potential limitations.
379
+
380
+ .. note::
381
+
382
+ The reentrant variant of checkpoint (``use_reentrant=True``) and
383
+ the non-reentrant variant of checkpoint (``use_reentrant=False``)
384
+ differ in the following ways:
385
+
386
+ * Non-reentrant checkpoint stops recomputation as soon as all needed
387
+ intermediate activations have been recomputed. This feature is enabled
388
+ by default, but can be disabled with :func:`set_checkpoint_early_stop`.
389
+ Reentrant checkpoint always recomputes :attr:`function` in its
390
+ entirety during the backward pass.
391
+
392
+ * The reentrant variant does not record the autograd graph during the
393
+ forward pass, as it runs with the forward pass under
394
+ :func:`torch.no_grad`. The non-reentrant version does record the
395
+ autograd graph, allowing one to perform backward on the graph within
396
+ checkpointed regions.
397
+
398
+ * The reentrant checkpoint only supports the
399
+ :func:`torch.autograd.backward` API for the backward pass without its
400
+ `inputs` argument, while the non-reentrant version supports all ways
401
+ of performing the backward pass.
402
+
403
+ * At least one input and output must have ``requires_grad=True`` for the
404
+ reentrant variant. If this condition is unmet, the checkpointed part
405
+ of the model will not have gradients. The non-reentrant version does
406
+ not have this requirement.
407
+
408
+ * The reentrant version does not consider tensors in nested structures
409
+ (e.g., custom objects, lists, dicts, etc) as participating in
410
+ autograd, while the non-reentrant version does.
411
+
412
+ * The reentrant checkpoint does not support checkpointed regions with
413
+ detached tensors from the computational graph, whereas the
414
+ non-reentrant version does. For the reentrant variant, if the
415
+ checkpointed segment contains tensors detached using ``detach()`` or
416
+ with :func:`torch.no_grad`, the backward pass will raise an error.
417
+ This is because ``checkpoint`` makes all the outputs require gradients
418
+ and this causes issues when a tensor is defined to have no gradient in
419
+ the model. To avoid this, detach the tensors outside of the
420
+ ``checkpoint`` function.
421
+
422
+ Args:
423
+ function: describes what to run in the forward pass of the model or
424
+ part of the model. It should also know how to handle the inputs
425
+ passed as the tuple. For example, in LSTM, if user passes
426
+ ``(activation, hidden)``, :attr:`function` should correctly use the
427
+ first input as ``activation`` and the second input as ``hidden``
428
+ preserve_rng_state(bool, optional): Omit stashing and restoring
429
+ the RNG state during each checkpoint. Note that under torch.compile,
430
+ this flag doesn't take effect and we always preserve RNG state.
431
+ Default: ``True``
432
+ use_reentrant(bool):
433
+ specify whether to use the activation checkpoint variant that
434
+ requires reentrant autograd. This parameter should be passed
435
+ explicitly. In version 2.4 we will raise an exception if
436
+ ``use_reentrant`` is not passed. If ``use_reentrant=False``,
437
+ ``checkpoint`` will use an implementation that does not require
438
+ reentrant autograd. This allows ``checkpoint`` to support additional
439
+ functionality, such as working as expected with
440
+ ``torch.autograd.grad`` and support for keyword arguments input into
441
+ the checkpointed function.
442
+ context_fn(Callable, optional): A callable returning a tuple of two
443
+ context managers. The function and its recomputation will be run
444
+ under the first and second context managers respectively.
445
+ This argument is only supported if ``use_reentrant=False``.
446
+ determinism_check(str, optional): A string specifying the determinism
447
+ check to perform. By default it is set to ``"default"`` which
448
+ compares the shapes, dtypes, and devices of the recomputed tensors
449
+ against those the saved tensors. To turn off this check, specify
450
+ ``"none"``. Currently these are the only two supported values.
451
+ Please open an issue if you would like to see more determinism
452
+ checks. This argument is only supported if ``use_reentrant=False``,
453
+ if ``use_reentrant=True``, the determinism check is always disabled.
454
+ debug(bool, optional): If ``True``, error messages will also include
455
+ a trace of the operators ran during the original forward computation
456
+ as well as the recomputation. This argument is only supported if
457
+ ``use_reentrant=False``.
458
+ args: tuple containing inputs to the :attr:`function`
459
+
460
+ Returns:
461
+ Output of running :attr:`function` on :attr:`*args`
462
+ """
463
+ if use_reentrant is None:
464
+ warnings.warn(
465
+ "torch.utils.checkpoint: the use_reentrant parameter should be "
466
+ "passed explicitly. In version 2.4 we will raise an exception "
467
+ "if use_reentrant is not passed. use_reentrant=False is "
468
+ "recommended, but if you need to preserve the current default "
469
+ "behavior, you can pass use_reentrant=True. Refer to docs for more "
470
+ "details on the differences between the two variants."
471
+ )
472
+ use_reentrant = True
473
+
474
+ # Hack to mix *args with **kwargs in a python 2.7-compliant way
475
+ preserve = kwargs.pop("preserve_rng_state", True)
476
+ if kwargs and use_reentrant:
477
+ raise ValueError(
478
+ "Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
479
+ )
480
+
481
+ if use_reentrant:
482
+ if context_fn is not noop_context_fn or debug is not False:
483
+ raise ValueError(
484
+ "Passing `context_fn` or `debug` is only supported when "
485
+ "use_reentrant=False."
486
+ )
487
+ return CheckpointFunction.apply(function, preserve, *args)
488
+ else:
489
+ gen = _checkpoint_without_reentrant_generator(
490
+ function, preserve, context_fn, determinism_check, debug, *args, **kwargs
491
+ )
492
+ # Runs pre-forward logic
493
+ next(gen)
494
+ ret = function(*args, **kwargs)
495
+ # Runs post-forward logic
496
+ try:
497
+ next(gen)
498
+ except StopIteration:
499
+ return ret
500
+
501
+
502
+ def checkpoint_sequential(functions, segments, input, use_reentrant=None, **kwargs):
503
+ r"""Checkpoint a sequential model to save memory.
504
+
505
+ Sequential models execute a list of modules/functions in order
506
+ (sequentially). Therefore, we can divide such a model in various segments
507
+ and checkpoint each segment. All segments except the last will not store
508
+ the intermediate activations. The inputs of each checkpointed segment will
509
+ be saved for re-running the segment in the backward pass.
510
+
511
+ .. warning::
512
+ The ``use_reentrant`` parameter should be passed explicitly. In version
513
+ 2.4 we will raise an exception if ``use_reentrant`` is not passed.
514
+ If you are using the ``use_reentrant=True` variant, please see
515
+ :func:`~torch.utils.checkpoint.checkpoint` for
516
+ the important considerations and limitations of this variant. It is
517
+ recommended that you use ``use_reentrant=False``.
518
+
519
+ .. warning:
520
+ Since PyTorch 1.4, it allows only one Tensor as the input and
521
+ intermediate outputs, just like :class:`torch.nn.Sequential`.
522
+
523
+ Args:
524
+ functions: A :class:`torch.nn.Sequential` or the list of modules or
525
+ functions (comprising the model) to run sequentially.
526
+ segments: Number of chunks to create in the model
527
+ input: A Tensor that is input to :attr:`functions`
528
+ preserve_rng_state(bool, optional): Omit stashing and restoring
529
+ the RNG state during each checkpoint.
530
+ Default: ``True``
531
+ use_reentrant(bool):
532
+ specify whether to use the activation checkpoint variant that
533
+ requires reentrant autograd. This parameter should be passed
534
+ explicitly. In version 2.4 we will raise an exception if
535
+ ``use_reentrant`` is not passed. If ``use_reentrant=False``,
536
+ ``checkpoint`` will use an implementation that does not require
537
+ reentrant autograd. This allows ``checkpoint`` to support additional
538
+ functionality, such as working as expected with
539
+ ``torch.autograd.grad`` and support for keyword arguments input into
540
+ the checkpointed function.
541
+
542
+ Returns:
543
+ Output of running :attr:`functions` sequentially on :attr:`*inputs`
544
+
545
+ Example:
546
+ >>> # xdoctest: +SKIP("stub")
547
+ >>> model = nn.Sequential(...)
548
+ >>> input_var = checkpoint_sequential(model, chunks, input_var)
549
+ """
550
+ if use_reentrant is None:
551
+ warnings.warn(
552
+ "torch.utils.checkpoint.checkpoint_sequential: the use_reentrant "
553
+ "parameter should be passed explicitly. "
554
+ "In version 2.4 we will raise an exception if use_reentrant "
555
+ "is not passed. use_reentrant=False is "
556
+ "recommended, but if you need to preserve the current default "
557
+ "behavior, you can pass use_reentrant=True. Refer to docs for more "
558
+ "details on the differences between the two variants."
559
+ )
560
+ use_reentrant = True
561
+
562
+ # Hack for keyword-only parameter in a python 2.7-compliant way
563
+ preserve = kwargs.pop("preserve_rng_state", True)
564
+ if kwargs:
565
+ raise ValueError(
566
+ "Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
567
+ )
568
+
569
+ def run_function(start, end, functions):
570
+ def forward(input):
571
+ for j in range(start, end + 1):
572
+ input = functions[j](input)
573
+ return input
574
+
575
+ return forward
576
+
577
+ if isinstance(functions, torch.nn.Sequential):
578
+ functions = list(functions.children())
579
+
580
+ segment_size = len(functions) // segments
581
+ # the last chunk has to be non-volatile
582
+ end = -1
583
+ for start in range(0, segment_size * (segments - 1), segment_size):
584
+ end = start + segment_size - 1
585
+ input = checkpoint(
586
+ run_function(start, end, functions),
587
+ input,
588
+ use_reentrant=use_reentrant,
589
+ preserve_rng_state=preserve,
590
+ )
591
+ return run_function(end + 1, len(functions) - 1, functions)(input)
592
+
593
+
594
+ def _internal_assert(cond):
595
+ if not cond:
596
+ raise AssertionError(
597
+ "Something went unexpectedly wrong in activation checkpoint. "
598
+ "Please report this bug by filing an issue to PyTorch."
599
+ )
600
+
601
+
602
+ # NOTE [ Nestable Checkpoint ]
603
+ #
604
+ # The semantics of nested checkpoint can be defined by two basic rules.
605
+ # Following the two rules leads to an important implication that is central
606
+ # to motivating the design.
607
+ #
608
+ # Rule 1. Saved tensors are managed by inner-most checkpoint only and hidden
609
+ # from any outer layers of checkpoint.
610
+ #
611
+ # Rule 2. The inputs of inner checkpoints are treated as tensors saved to its
612
+ # parent checkpoint.
613
+ #
614
+ # Implication: To recompute any given saved tensor, we need to recompute all of
615
+ # the checkpoints wrapping it.
616
+ #
617
+ # Why is this implied? To unpack a saved tensor X during backward we need to
618
+ # recompute the inner-most checkpoint (#1), and in order to recompute that
619
+ # checkpoint I need to have its inputs, which are managed by that checkpoint's
620
+ # parent (#2), which thus also needs to be recomputed first. Continue this line
621
+ # of reasoning and we realize that in order to unpack X, all checkpoints that
622
+ # were active at the time X was saved need to be recomputed. (unless we have
623
+ # already done so in that backward for some other saved tensor).
624
+ #
625
+ # In practice, we use a noop autograd Function to save inputs as saved tensors.
626
+ # During unpack calling ctx.saved_tensor triggers the parent checkpoint to
627
+ # recompute.
628
+ #
629
+ # Rule 3. We should start recomputation as if there are no checkpoints currently
630
+ # active. Checkpoints encountered during recomputation are still
631
+ # respected.
632
+ #
633
+ # When we start recomputation, we push the saved variable hook meant for
634
+ # recomputation on the stack. See examples in Rule 6 for more context.
635
+ #
636
+ # * * * *
637
+ #
638
+ # Beyond the basic semantics specific to nested checkpoint, we impose several
639
+ # more constraints that may apply to checkpointing in general.
640
+ #
641
+ # Rule 4. Lifetime of recomputed tensors
642
+ #
643
+ # Recomputed tensors are considered specific to particular invocations
644
+ # of backward and are always cleared immediately as they are unpacked
645
+ # Particularly, we require this to happen even if retain_graph=True.
646
+ #
647
+ # [ Implementation details of Rule 4 ]
648
+ #
649
+ # If we were okay with recomputed tensors staying alive after backward is run
650
+ # with retain_graph=True, we would store recomputed variables as the values of a
651
+ # WeakKeyDictionary and pack strong references to the keys, so that as we
652
+ # backward, those packed keys would be cleared as long as retain_graph=False.
653
+ # Clearing the packed key clears the corresponding entry in the WKD.
654
+ #
655
+ # If we wish recomputed variables to be immediately cleared as we unpack them in
656
+ # the retain_graph=True case, we cannot rely on the packed keys to be cleared by
657
+ # backward automatically. Instead of packing the strong reference to the key
658
+ # directly, we pack a container object, which we manually clear as we unpack.
659
+ #
660
+ # An important detail is that if a second backward happens, the second
661
+ # recomputation needs to reset the container with a newly created key.
662
+ #
663
+ # Rule 5. Stop recomputation as soon as we've recomputed the saved tensors we
664
+ # know we need.
665
+ #
666
+ # [ Implementation details of Rule 5 ]
667
+ #
668
+ # During recomputation, raise an exception if the number of recomputed tensors
669
+ # matches the number of tensors that we expected to recompute. We wrap the
670
+ # recomputation call with a try-catch to catch this specific exception. See
671
+ # Rule #6 below for some examples.
672
+ #
673
+ # Rule 6. We support doing backward inside checkpoint context
674
+ #
675
+ # [ retain_graph is True]
676
+ #
677
+ # def fn(x):
678
+ # y = x.sin()
679
+ # z = y.cos()
680
+ # gx, = torch.autograd.grad(z, x, retains_grad=True)
681
+ # return gx, z
682
+ #
683
+ # out = checkpoint(fn)(inp)
684
+ # out.backward()
685
+ #
686
+ # Because z is saved by cos while checkpoint is enabled, it would not be
687
+ # actually saved, and so the .grad() call inside must trigger a recomputation.
688
+ #
689
+ # During recomputation the "inner pack hook" has two responsibilities:
690
+ #
691
+ # 1) As usual, populating the WeakKeyDictionary storing recomputed tensors
692
+ # 2) Pack the actual tensor (detached) so that one may perform backward on the
693
+ # recomputed graph. The tensors saved to this graph will live until the end
694
+ # of recomputation, or die earlier if someone performs backward with
695
+ # retain_graph=False.
696
+ #
697
+ # More generally performing backward on the recomputed graph occurs in the
698
+ # following cases:
699
+ # - If backward is performed inside forward,
700
+ # - During the original forward IF early-stop is disabled
701
+ # - During the original backward
702
+ # - If there are multiple .grad()/.backward() calls, we would perform backward
703
+ # on the recomputed graph even if early-stop is enabled (see the example below)
704
+ #
705
+ # [ retain_graph is False ]
706
+ #
707
+ # The example below shows what happens if during recomputation we find that some
708
+ # of the tensors we are trying to recompute have already been cleared.
709
+ #
710
+ # Spoiler: we don't do anything special, we just skip over them!
711
+ #
712
+ # def fn(x):
713
+ # y = x.sin() # (1)
714
+ # z = y.cos() # (2)
715
+ # gx, = torch.autograd.grad(z, x) # (3)
716
+ # return x.cos() * gx # (4)
717
+ #
718
+ # out = checkpoint(fn)(inp)
719
+ # out.backward() # (5)
720
+ #
721
+ # 1, 2. Don't save x and y since we are inside a checkpoint.
722
+ # 3. Trigger a recompute of fn since x and y weren't saved.
723
+ # And depending on whether early stop is enabled, either stop at (2) or
724
+ # continue running the function.
725
+ # Because we are running backward with retain_graph=False, we clear x and y's
726
+ # holders.
727
+ # 4. Don't save x since we are inside a checkpoint.
728
+ # 5. Calling backward triggers another recompute of fn. During recompute, we see
729
+ # that x and y have already been cleared in the original graph as indicated
730
+ # by holder=None. We skip over them. We still save x at (4) (since its holder
731
+ # is still alive.)
732
+
733
+ _enable_checkpoint_early_stop = True
734
+
735
+
736
+ @contextlib.contextmanager
737
+ def set_checkpoint_early_stop(enable: bool):
738
+ """Context manager that sets whether checkpoint should stop recomputation early.
739
+
740
+ By default, non-reentrant checkpoint stops recomputation as soon as it
741
+ has computed all needed Tensors. This context manager can be used to disable
742
+ that feature if it is problematic for your specific application.
743
+
744
+ This context manager only needs to be active when forward is run. It does
745
+ not need to be active during backward.
746
+
747
+ Example::
748
+
749
+ >>> # xdoctest: +SKIP(failing)
750
+ >>> message = "saved tensors default hooks are disabled"
751
+ >>> with set_checkpoint_early_stop(False):
752
+ ... # Any checkpoint under this context manager will respect this
753
+ ... # context manager, even if its backward is performed outside.
754
+ ... out = checkpoint(fn, inputs)
755
+ ...
756
+ >>> out.backward()
757
+ """
758
+ global _enable_checkpoint_early_stop
759
+ try:
760
+ prev = _enable_checkpoint_early_stop
761
+ _enable_checkpoint_early_stop = enable
762
+ yield
763
+ finally:
764
+ _enable_checkpoint_early_stop = prev
765
+
766
+
767
+ class _Handle:
768
+ pass
769
+
770
+
771
+ class _Holder:
772
+ def __init__(self):
773
+ self.handles: Dict[int, Optional[_Handle]] = dict()
774
+
775
+
776
+ class _NoopSaveInputs(torch.autograd.Function):
777
+ @staticmethod
778
+ def forward(*args):
779
+ return torch.empty((0,))
780
+
781
+ @staticmethod
782
+ def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
783
+ # Only tensors can be saved with ctx.save_for_backward, everything else
784
+ # is captured by get_args, which is saved directly on ctx
785
+ tensor_indices, tensors = zip(
786
+ *[(i, o) for i, o in enumerate(inputs) if isinstance(o, torch.Tensor)]
787
+ )
788
+ idx2saved_idx = {b: a for a, b in enumerate(tensor_indices)}
789
+ # args but with tensors replaced with None as placeholders
790
+ args = [None if isinstance(o, torch.Tensor) else o for o in inputs]
791
+
792
+ def get_args(saved_tensors):
793
+ # restore the placeholders with the original tensors grabbed from
794
+ # ctx.saved_tensors (which may be saved on a parent checkpoint if
795
+ # this checkpoint is nested, and that would trigger a recursive
796
+ # unpack!)
797
+ ret = [
798
+ saved_tensors[idx2saved_idx[i]] if i in tensor_indices else o
799
+ for i, o in enumerate(args)
800
+ ]
801
+ # grab the tail since we also saved the dummy to avoid having to explicitly
802
+ # handle the case where there are no tensor inputs
803
+ return ret[1:]
804
+
805
+ ctx.get_args = get_args
806
+ ctx.save_for_backward(*tensors)
807
+
808
+ @staticmethod
809
+ def backward(ctx, *grad_outputs):
810
+ raise AssertionError("Did not expect to backward on this graph")
811
+
812
+
813
+ class _CheckpointFrame:
814
+ def __init__(self, recompute_fn, early_stop, unpack_error_cb, metadata_fn):
815
+ self.recompute_fn = recompute_fn
816
+ self.input_saver = None
817
+ self.weak_holders: List[ReferenceType] = []
818
+ # We store this as a weakkeydictionary so that in the case of a partial
819
+ # backward, the entries in the dict are cleared alongside the Holder
820
+ # which will be removed when the SavedVariable is cleared.
821
+ self.recomputed: DefaultDict[
822
+ int, weakref.WeakKeyDictionary[_Handle, torch.Tensor]
823
+ ] = defaultdict(weakref.WeakKeyDictionary)
824
+ # We need both recomp_counter and recomputed since they can diverge
825
+ # https://github.com/pytorch/pytorch/pull/90105#discussion_r1135889885
826
+ self.recomp_counter: DefaultDict[int, int] = defaultdict(int)
827
+ self.is_recomputed: DefaultDict[int, bool] = defaultdict(bool)
828
+
829
+ # See Rule 5
830
+ self.early_stop = early_stop
831
+
832
+ # Debugging
833
+ self.metadata_fn = metadata_fn
834
+ self.unpack_error_cb = unpack_error_cb
835
+ self.x_metadatas = []
836
+ self.forward_completed = False
837
+ self.ignore_saved_mismatch = False
838
+
839
+ def check_recomputed_tensors_match(self, gid):
840
+ if self.ignore_saved_mismatch:
841
+ # TODO: we can probably make this check stricter by checking that
842
+ # the metadata of the first tensors still match.
843
+ return
844
+ # NOTE [ Error handling for checkpoint ]
845
+ #
846
+ # At a high level, we need to check that the tensors saved
847
+ # during original forward matches tensors saved during recompute
848
+ # This means handling 3 cases:
849
+ #
850
+ # 1. During recompute, more tensors were saved.
851
+ #
852
+ # Usually this is hidden due to the StopRecomputationError
853
+ # but if early stop is not enabled, or we would have errored
854
+ # anyway because there aren't enough weak_holders. But we
855
+ # do want to have a nice error. See the _recomputation_hook
856
+ # for details.
857
+ if not len(self.weak_holders) == self.recomp_counter[gid]:
858
+ # 2. During recompute, fewer tensors were saved
859
+ #
860
+ # We know that everytime we save something do original forward
861
+ # we append to weak_holder, and every time we save a tensor
862
+ # during recompute we increment recompute_counter.
863
+ raise CheckpointError(
864
+ "torch.utils.checkpoint: A different number of tensors was saved "
865
+ "during the original forward and recomputation.\n"
866
+ f"Number of tensors saved during forward: {len(self.weak_holders)}\n"
867
+ f"Number of tensors saved during recomputation: {self.recomp_counter[gid]}"
868
+ )
869
+
870
+ # 3. During recompute, the same tensors were saved, but they
871
+ # have different metadata
872
+ nb_meta_different = []
873
+ for idx, weak_holder in enumerate(self.weak_holders):
874
+ holder = weak_holder()
875
+ if holder is None:
876
+ continue
877
+ # We've seen all holders since we iterate over them in order
878
+ # For every holder that is still alive now, it must've been
879
+ # alive when we saw it during recompute, therefore, the
880
+ # gid must be set.
881
+ _internal_assert(gid in holder.handles)
882
+ # We know this is the first unpack, so it couldn't have been set
883
+ # to None yet.
884
+ _internal_assert(holder.handles[gid] is not None)
885
+ # We always set these together in the recomputation hook
886
+ _internal_assert(holder.handles[gid] in self.recomputed[gid])
887
+ # see pack hook, x_metadata is 1:1 with weak_holders.
888
+ x_meta = self.x_metadatas[idx]
889
+ recomputed_x = self.recomputed[gid][holder.handles[gid]]
890
+ if x_meta != self.metadata_fn(recomputed_x):
891
+ nb_meta_different.append((idx, x_meta, self.metadata_fn(recomputed_x)))
892
+
893
+ if len(nb_meta_different) > 0:
894
+ mismatched_tensors = ""
895
+ for idx, x_meta, recomputed_meta in nb_meta_different:
896
+ mismatched_tensors += (
897
+ f"tensor at position {idx}:\n"
898
+ f"saved metadata: {x_meta}\n"
899
+ f"recomputed metadata: {recomputed_meta}\n"
900
+ )
901
+ raise CheckpointError(
902
+ "torch.utils.checkpoint: Recomputed values for the following tensors "
903
+ "have different metadata than during the forward pass.\n"
904
+ f"{mismatched_tensors}"
905
+ )
906
+
907
+
908
+ _checkpoint_error_template = """ \
909
+ An error happened while unpacking tensors; dumping logs of latest computation
910
+ because you passed `debug=True` to `torch.utils.checkpoint.checkpoint()`.
911
+ Scroll all the way down for guidance on how to navigate these logs.
912
+
913
+ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
914
+ | 1. Stack traces of the operators that ran in the original forward |
915
+ +------------------------------------------------------------------------------+
916
+
917
+ {forward_traces}
918
+ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
919
+ | 2. Stack traces of the operators that ran during recomputation |
920
+ +------------------------------------------------------------------------------+
921
+
922
+ {recompute_traces}
923
+ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
924
+ | 3. Log of operators in the original forward and recomputation |
925
+ +------------------------------------------------------------------------------+
926
+ (Scroll up to correlate stack traces with each operation listed below. This
927
+ helps identify their source in the code.)
928
+
929
+ IMPORTANT: Differences in "detach" calls between the original forward and the
930
+ recomputation are expected. They are introduced by the checkpointing
931
+ mechanism and can be ignored.
932
+
933
+ Operations executed during the original forward:
934
+
935
+ {forward_ops}
936
+
937
+ Operations executed during recomputation:
938
+
939
+ {recompute_ops}
940
+
941
+ +------------------------------------------------------------------------------+
942
+ ERROR: Detected non-determinism while running activation checkpointing
943
+
944
+ You are seeing this error because you passed `debug=True` to checkpoint and
945
+ tensors to be saved during the original forward and differ between those saved
946
+ during recomputation. This can happen if different operators were ran in the
947
+ original forward and in the recomputation.
948
+
949
+ To identify where the mismatch may be coming from, you can do the following:
950
+
951
+ 1) Compare the operators ran during original forward and recomputation to
952
+ see where they differ. These operators are printed above in the order they
953
+ were executed.
954
+
955
+ 2) Review the stack trace for each operator to locate its invocation source.
956
+ Each operator's stack trace is printed in their execution order.
957
+
958
+ Note that the logs can be quite long. Here's how they are structured:
959
+ (Tip: you can Ctrl-f for these headers)
960
+
961
+ 1. Stack traces of the operators that ran in the original forward
962
+ 2. Stack traces of the operators that ran during recomputation
963
+ 3. Log of operators in the original forward and recomputation
964
+ 4. Error message <--- You are here
965
+ --------------------------------------------------------------------------------
966
+ """
967
+
968
+ class CheckpointError(RuntimeError):
969
+ pass
970
+
971
+
972
+ def _get_debug_context_and_cb() -> Tuple[Callable[[], Any], Callable[[CheckpointError], None]]:
973
+ # This function returns the context_fn and error_cb to be used by the
974
+ # checkpointing mechanism. error_cb is invoked when an error is detected
975
+ # during unpack.
976
+
977
+ # record_context_cpp is not support on non-linux non-x86_64 platforms
978
+ cpp_tb = platform.machine() == 'x86_64' and platform.system() == 'Linux'
979
+
980
+ class CaptureLogs:
981
+ def __init__(self):
982
+ self.logs = None
983
+ self.tbs = None
984
+
985
+ def get_context_manager(self):
986
+ @contextlib.contextmanager
987
+ def logging_mode():
988
+ with LoggingTensorMode(), \
989
+ capture_logs(True, python_tb=True, script_tb=True, cpp_tb=cpp_tb) as logs_and_tb:
990
+ self.logs, self.tbs = logs_and_tb
991
+ yield logs_and_tb
992
+ return logging_mode()
993
+
994
+ capture_logs_fwd = CaptureLogs()
995
+ capture_logs_recompute = CaptureLogs()
996
+
997
+ def unpack_error_cb(e: CheckpointError):
998
+ def get_str_tb(label, capture_logs):
999
+ out = ""
1000
+ total_len = len(capture_logs.logs)
1001
+ for i, (log, tb) in enumerate(zip(capture_logs.logs, capture_logs.tbs)):
1002
+ out += f"{log} ({i + 1} of {total_len} in {label})\n\n"
1003
+ found_torch_dispatch = False
1004
+ for line in tb:
1005
+ # Start printing stack trace only after __torch_dispatch__ is found
1006
+ is_torch_dispatch = line['name'] == '__torch_dispatch__'
1007
+ if not found_torch_dispatch and not is_torch_dispatch:
1008
+ continue
1009
+ elif is_torch_dispatch:
1010
+ found_torch_dispatch = True
1011
+ continue
1012
+ out += f"{line['filename']}:{line['line']}:{line['name']}\n"
1013
+ out += "\n\n"
1014
+ return out
1015
+ assert capture_logs_fwd.logs is not None
1016
+ assert capture_logs_recompute.logs is not None
1017
+ raise CheckpointError(
1018
+ _checkpoint_error_template.format(
1019
+ forward_traces=get_str_tb("original", capture_logs_fwd),
1020
+ recompute_traces=get_str_tb("recompute", capture_logs_recompute),
1021
+ forward_ops="\n".join(capture_logs_fwd.logs),
1022
+ recompute_ops="\n".join(capture_logs_recompute.logs)
1023
+ )
1024
+ ) from e
1025
+
1026
+ def context_fn():
1027
+ return capture_logs_fwd.get_context_manager(), capture_logs_recompute.get_context_manager()
1028
+
1029
+ return context_fn, unpack_error_cb
1030
+
1031
+ def _default_meta_extractor(x: torch.Tensor) -> Dict[str, Any]:
1032
+ # These properties are fast to check, easy to understand
1033
+ return {
1034
+ "shape": x.shape,
1035
+ "dtype": x.dtype,
1036
+ "device": x.device
1037
+ }
1038
+
1039
+ _allowed_determinism_checks_to_fns: Dict[str, Callable[[torch.Tensor], Any]] = {
1040
+ _DEFAULT_DETERMINISM_MODE: _default_meta_extractor,
1041
+ "none": lambda _: None,
1042
+ }
1043
+
1044
+ # See Rule 5
1045
+ class _StopRecomputationError(Exception):
1046
+ pass
1047
+
1048
+
1049
+ class _recomputation_hook(torch.autograd.graph.saved_tensors_hooks):
1050
+ def __init__(self, target_frame_ref: ReferenceType, gid: int):
1051
+ def pack_hook(x):
1052
+ target_frame = target_frame_ref()
1053
+ assert target_frame is not None # appease mypy
1054
+ recomp_idx = target_frame.recomp_counter[gid]
1055
+ target_frame.recomp_counter[gid] += 1
1056
+
1057
+ if recomp_idx >= len(target_frame.weak_holders):
1058
+ assert not target_frame.early_stop
1059
+ if not target_frame.forward_completed:
1060
+ # We run into this case when early stop is not enabled and do
1061
+ # grad within checkpoint.
1062
+ # We need to set this flag, so we don't error out later when
1063
+ # we check if the number of tensors saved during forward and
1064
+ # recomputation match.
1065
+ target_frame.ignore_saved_mismatch = True
1066
+ return x.detach()
1067
+ raise CheckpointError(
1068
+ "torch.utils.checkpoint: trying to save more tensors during "
1069
+ "recomputation than during the original forward pass."
1070
+ )
1071
+
1072
+ holder = target_frame.weak_holders[recomp_idx]()
1073
+
1074
+ # This holder may have been cleared because someone may have called
1075
+ # backward within forward. If so, we don't need to save.
1076
+ if holder is not None:
1077
+ _internal_assert(holder.handles.get(gid, None) is None)
1078
+ holder.handles[gid] = _Handle()
1079
+ target_frame.recomputed[gid][holder.handles[gid]] = x.detach()
1080
+
1081
+ if target_frame.early_stop and target_frame.recomp_counter[gid] == len(
1082
+ target_frame.weak_holders
1083
+ ):
1084
+ raise _StopRecomputationError()
1085
+ # See Rule 6: [ retain_graph is True ] above
1086
+ return x.detach()
1087
+
1088
+ def unpack_hook(x):
1089
+ # See Rule 6: [ retain_graph is True ] above for an example of when
1090
+ # the graph created during recomputation could be backwarded.
1091
+ return x
1092
+
1093
+ super().__init__(pack_hook, unpack_hook)
1094
+
1095
+
1096
+ class _checkpoint_hook(torch.autograd.graph.saved_tensors_hooks):
1097
+ def __init__(self, frame):
1098
+ def pack_hook(x):
1099
+ # See Rule 4 above
1100
+ holder = _Holder()
1101
+ frame.weak_holders.append(weakref.ref(holder))
1102
+ # Save metadata to detect non-determinism
1103
+ if frame.metadata_fn is not None:
1104
+ with torch.no_grad():
1105
+ frame.x_metadatas.append(frame.metadata_fn(x))
1106
+ return holder
1107
+
1108
+ def unpack_hook(holder):
1109
+ gid = torch._C._current_graph_task_id()
1110
+ if gid == -1:
1111
+ # generate a temporary id if we trigger unpack outside of a backward call
1112
+ gid = int(uuid.uuid4())
1113
+
1114
+ if not frame.is_recomputed[gid]:
1115
+ ctx = frame.input_saver.grad_fn
1116
+ args = ctx.get_args(ctx.saved_tensors)
1117
+
1118
+ try:
1119
+ with _recomputation_hook(
1120
+ weakref.ref(frame), gid
1121
+ ), torch.autograd.enable_grad():
1122
+ frame.recompute_fn(*args)
1123
+ except _StopRecomputationError:
1124
+ pass
1125
+ frame.is_recomputed[gid] = True
1126
+ frame.check_recomputed_tensors_match(gid)
1127
+
1128
+ _internal_assert(gid in holder.handles)
1129
+
1130
+ if holder.handles[gid] is None:
1131
+ raise CheckpointError(
1132
+ "torch.utils.checkpoint: Unpack is being triggered for a tensor that was already "
1133
+ "unpacked once. If you are calling ctx.saved_tensors in backward, make sure to do "
1134
+ "so only once. Otherwise please open an issue with details on your use case."
1135
+ )
1136
+ _internal_assert(holder.handles[gid] in frame.recomputed[gid])
1137
+ ret = frame.recomputed[gid][holder.handles[gid]]
1138
+ holder.handles[gid] = None
1139
+ return ret
1140
+
1141
+ if frame.unpack_error_cb is not None:
1142
+ def unpack_hook_with_error_cb(holder):
1143
+ try:
1144
+ return unpack_hook(holder)
1145
+ except CheckpointError as e:
1146
+ frame.unpack_error_cb(e)
1147
+ super().__init__(pack_hook, unpack_hook_with_error_cb)
1148
+ else:
1149
+ super().__init__(pack_hook, unpack_hook)
1150
+
1151
+
1152
+ def _is_compiling(func, args, kwargs):
1153
+ # Check if we are under AOTAutograd tracing
1154
+ # There should probably be a better way to do this...
1155
+ # TODO: unify _is_compiling across all compile stacks
1156
+ for arg in args:
1157
+ if isinstance(arg, torch.Tensor) and is_fun(arg):
1158
+ return True
1159
+ return False
1160
+
1161
+
1162
+ def _detach(x):
1163
+ if isinstance(x, torch.Tensor):
1164
+ return x.detach()
1165
+ return x
1166
+
1167
+
1168
+ uid = count(1)
1169
+
1170
+
1171
+ # NOTE: torch.utils.checkpoint internal logic will call these two functions unknown number of times
1172
+ # (i.e. there could be _CachedTorchDispatchMode calls that doesn't map to a _CachingTorchDispatchMode call),
1173
+ # so we ignore these ops and just always recompute them.
1174
+ _ignored_ops = {
1175
+ torch.ops.prim.device.default,
1176
+ torch.ops.aten.detach.default,
1177
+ } | set(torch._subclasses.functional_tensor.FunctionalTensor.metadata_fns)
1178
+
1179
+
1180
+ class _CachingTorchDispatchMode(TorchDispatchMode):
1181
+ r"""
1182
+ A :class:`TorchDispatchMode` to implement selective activation checkpointing
1183
+ that's compatible with torch.compile. Used together with _CachedTorchDispatchMode.
1184
+ """
1185
+ def __init__(self, policy_fn, storage):
1186
+ self.policy_fn = policy_fn
1187
+ self.storage = storage
1188
+
1189
+ def push_into_storage(self, out, func, args, kwargs):
1190
+ out_detached = tree_map(_detach, out)
1191
+ self.storage[func].append(out_detached)
1192
+
1193
+ def _handle_compile_in_forward_ctx(self, should_not_recompute, func, args, kwargs):
1194
+ if func in _ignored_ops:
1195
+ return func(*args, **kwargs)
1196
+ if should_not_recompute:
1197
+ fx_traceback.current_meta["recompute"] = 0
1198
+ # NOTE: Here we just store and reuse output of all ops, since in torch.compile mode
1199
+ # we decide and handle recomputation in the partitioner.
1200
+ out = func(*args, **kwargs)
1201
+ self.push_into_storage(out, func, args, kwargs)
1202
+ return out
1203
+
1204
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
1205
+ if kwargs is None:
1206
+ kwargs = {}
1207
+ should_not_recompute = self.policy_fn("forward", func, *args, **kwargs)
1208
+ if _is_compiling(func, args, kwargs):
1209
+ return self._handle_compile_in_forward_ctx(should_not_recompute, func, args, kwargs)
1210
+ else:
1211
+ if should_not_recompute:
1212
+ out = func(*args, **kwargs)
1213
+ self.push_into_storage(out, func, args, kwargs)
1214
+ else:
1215
+ out = func(*args, **kwargs)
1216
+ return out
1217
+
1218
+
1219
+ class _CachedTorchDispatchMode(TorchDispatchMode):
1220
+ r"""
1221
+ A :class:`TorchDispatchMode` to implement selective activation checkpointing
1222
+ that's compatible with torch.compile. Used together with _CachingTorchDispatchMode.
1223
+ """
1224
+ def __init__(self, policy_fn, storage):
1225
+ self.policy_fn = policy_fn
1226
+ self.storage = storage
1227
+
1228
+ def pop_from_storage(self, func, args, kwargs):
1229
+ assert func in self.storage
1230
+ out = self.storage[func].pop(0)
1231
+ return out
1232
+
1233
+ def _handle_compile_in_recompute_ctx(self, should_not_recompute, func, args, kwargs):
1234
+ if func in _ignored_ops:
1235
+ return func(*args, **kwargs)
1236
+ out = self.pop_from_storage(func, args, kwargs)
1237
+ return out
1238
+
1239
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
1240
+ if kwargs is None:
1241
+ kwargs = {}
1242
+ should_not_recompute = self.policy_fn("recompute", func, *args, **kwargs)
1243
+ if _is_compiling(func, args, kwargs):
1244
+ return self._handle_compile_in_recompute_ctx(should_not_recompute, func, args, kwargs)
1245
+ else:
1246
+ if should_not_recompute:
1247
+ out = self.pop_from_storage(func, args, kwargs)
1248
+ else:
1249
+ out = func(*args, **kwargs)
1250
+ return out
1251
+
1252
+
1253
+ def _pt2_selective_checkpoint_context_fn_gen(policy_fn):
1254
+ """
1255
+ A helper function that generates a pair of contexts to be later passed into
1256
+ `torch.utils.checkpoint` API to implment selective checkpointing.
1257
+
1258
+ .. warning::
1259
+ This is context_fn is intended for use with torch.compile only.
1260
+
1261
+ Args:
1262
+ policy_fn (Callable[[Callable, List[Any], Dict[str, Any]], bool]): Policy function
1263
+ to decide whether a particular op should be recomputed in backward pass or not.
1264
+ In eager mode:
1265
+ If policy_fn(...) returns True, the op is guaranteed to NOT be recomputed.
1266
+ If policy_fn(...) returns False, the op is guaranteed to be recomputed.
1267
+ In torch.compile mode:
1268
+ If policy_fn(...) returns True, the op is guaranteed to NOT be recomputed.
1269
+ If policy_fn(...) returns False, the op may or may not be recomputed
1270
+ (it's up to the partitioner to decide).
1271
+
1272
+ Returns:
1273
+ A pair of generated contexts.
1274
+
1275
+ Example:
1276
+ >>> # xdoctest: +REQUIRES(LINUX)
1277
+ >>>
1278
+ >>> def get_custom_policy():
1279
+ >>> no_recompute_list = [
1280
+ >>> torch.ops.aten.mm.default,
1281
+ >>> ]
1282
+ >>> def custom_policy(mode, func, *args, **kwargs):
1283
+ >>> return func in no_recompute_list
1284
+ >>> return custom_policy
1285
+ >>>
1286
+ >>> def selective_checkpointing_context_fn():
1287
+ >>> return _pt2_selective_checkpoint_context_fn_gen(get_custom_policy())
1288
+ >>>
1289
+ >>> def gn(x, y):
1290
+ >>> return torch.sigmoid(torch.matmul(torch.matmul(x, y), y)) * y
1291
+ >>>
1292
+ >>> def fn(x, y):
1293
+ >>> return torch.utils.checkpoint.checkpoint(
1294
+ >>> gn, x, y,
1295
+ >>> use_reentrant=False,
1296
+ >>> context_fn=selective_checkpointing_context_fn,
1297
+ >>> )
1298
+ >>>
1299
+ >>> x = torch.randn(4, 4, requires_grad=True)
1300
+ >>> y = torch.randn(4, 4, requires_grad=True)
1301
+ >>>
1302
+ >>> compiled_fn = torch.compile(fn)
1303
+ """
1304
+ storage: Dict[Any, List[Any]] = defaultdict(list)
1305
+ return _CachingTorchDispatchMode(policy_fn, storage), _CachedTorchDispatchMode(policy_fn, storage)
1306
+
1307
+
1308
+ # NB: this helper wraps fn before calling checkpoint_impl. kwargs and
1309
+ # saving/restoring of global state is handled here.
1310
+
1311
+ def _checkpoint_without_reentrant_generator(
1312
+ fn,
1313
+ preserve_rng_state=True,
1314
+ context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn,
1315
+ determinism_check: str = _DEFAULT_DETERMINISM_MODE,
1316
+ debug: bool = False,
1317
+ *args,
1318
+ **kwargs
1319
+ ):
1320
+ """Checkpointing without reentrant autograd.
1321
+
1322
+ Args:
1323
+ function: describes what to run in the forward pass of the model or
1324
+ part of the model. It should also know how to handle the inputs
1325
+ passed as the tuple. For example, in LSTM, if user passes
1326
+ ``(activation, hidden)``, :attr:`function` should correctly use the
1327
+ first input as ``activation`` and the second input as ``hidden``
1328
+ preserve_rng_state(bool, optional): Omit stashing and restoring
1329
+ the RNG state during each checkpoint.
1330
+ Default: ``True``
1331
+ context_fn(Callable, optional): A callable returning a tuple of two
1332
+ context managers. The function and its recomputation will be run
1333
+ under the first and second context managers respectively.
1334
+ determinism_check(str, optional): A string specifying the determinism
1335
+ check to perform. By default it is set to ``"default"`` which
1336
+ compares the shapes, dtypes, and devices of the recomputed tensors
1337
+ against those the saved tensors. To turn off this check, specify
1338
+ ``"none"``. Currently these are the only two supported values.
1339
+ Please open an issue if you would like to see more determinism
1340
+ checks.
1341
+ debug(bool, optional): If ``True``, error messages will also include
1342
+ a trace of the operators ran during the original forward computation
1343
+ as well as the recomputation.
1344
+ *args: Arguments to pass in to the given ``function``.
1345
+ **kwargs: Keyword arguments to pass into the given ``function``.
1346
+ """
1347
+ unpack_error_cb = None
1348
+
1349
+ if _checkpoint_debug_enabled if _checkpoint_debug_enabled is not None else debug:
1350
+ if context_fn != noop_context_fn:
1351
+ raise ValueError(
1352
+ "debug=True is incompatible with non-default context_fn"
1353
+ )
1354
+ context_fn, unpack_error_cb = _get_debug_context_and_cb()
1355
+
1356
+ if determinism_check in _allowed_determinism_checks_to_fns:
1357
+ metadata_fn = _allowed_determinism_checks_to_fns[determinism_check]
1358
+ else:
1359
+ raise ValueError(
1360
+ f"determinism_check should be one of {list(_allowed_determinism_checks_to_fns.keys())}, "
1361
+ f"but got {determinism_check}"
1362
+ )
1363
+
1364
+ device = _infer_device_type(*args)
1365
+ device_module = _get_device_module(device)
1366
+ forward_context, recompute_context = context_fn()
1367
+ if _is_compiling(fn, args, kwargs) and context_fn != noop_context_fn:
1368
+ assert (
1369
+ isinstance(forward_context, TorchDispatchMode) and
1370
+ isinstance(recompute_context, TorchDispatchMode)
1371
+ ), \
1372
+ "In torch.compile mode, `context_fn` arg passed to `torch.utils.checkpoint` " + \
1373
+ "must generate a tuple of two `TorchDispatchMode`s."
1374
+ # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu.
1375
+ device_autocast_kwargs, cpu_autocast_kwargs = _get_autocast_kwargs(device=device)
1376
+
1377
+ if preserve_rng_state:
1378
+ fwd_cpu_state = torch.get_rng_state()
1379
+ # Don't eagerly initialize the cuda context by accident.
1380
+ # (If the user intends that the context is initialized later, within their
1381
+ # run_function, we SHOULD actually stash the cuda state here. Unfortunately,
1382
+ # we have no way to anticipate this will happen before we run the function.
1383
+ # If they do so, we raise an error.)
1384
+ had_device_in_fwd = False
1385
+ if getattr(device_module, "_initialized", False):
1386
+ had_device_in_fwd = True
1387
+ fwd_devices, fwd_device_states = get_device_states(*args)
1388
+
1389
+ def recompute_fn(*inputs):
1390
+ kwargs, *args = inputs
1391
+ # This will be called later during recomputation. This wrapping enables
1392
+ # the necessary global state to be captured.
1393
+ rng_devices = []
1394
+ if preserve_rng_state and had_device_in_fwd:
1395
+ rng_devices = fwd_devices
1396
+ with torch.random.fork_rng(
1397
+ devices=rng_devices, enabled=preserve_rng_state, device_type=device
1398
+ ):
1399
+ if preserve_rng_state:
1400
+ torch.set_rng_state(fwd_cpu_state)
1401
+ if had_device_in_fwd:
1402
+ set_device_states(fwd_devices, fwd_device_states)
1403
+
1404
+ device_autocast_ctx = device_module.amp.autocast(
1405
+ **device_autocast_kwargs
1406
+ ) if _supports_autocast(device) else contextlib.nullcontext()
1407
+ with device_autocast_ctx, torch.cpu.amp.autocast(**cpu_autocast_kwargs), \
1408
+ recompute_context:
1409
+ fn(*args, **kwargs)
1410
+
1411
+ new_frame = _CheckpointFrame(
1412
+ recompute_fn,
1413
+ _enable_checkpoint_early_stop,
1414
+ unpack_error_cb,
1415
+ metadata_fn
1416
+ )
1417
+ dummy = torch.empty((0,), requires_grad=True)
1418
+ new_frame.input_saver = _NoopSaveInputs.apply(dummy, kwargs, *args)
1419
+
1420
+ # When ambient grad_mode is False
1421
+ if new_frame.input_saver.grad_fn is None:
1422
+ yield
1423
+ return
1424
+
1425
+ with _checkpoint_hook(new_frame), forward_context:
1426
+ yield
1427
+ new_frame.forward_completed = True
1428
+
1429
+ if getattr(device_module, "_initialized", False) and \
1430
+ preserve_rng_state and not had_device_in_fwd: # type: ignore[possibly-undefined]
1431
+ # Device was not initialized before running the forward, so we didn't
1432
+ # stash the device state.
1433
+ raise RuntimeError(
1434
+ "PyTorch's device state was initialized in the forward pass "
1435
+ "of a Checkpoint, which is not allowed. Please open an issue "
1436
+ "if you need this feature."
1437
+ )
1438
+
1439
+ return
llmeval-env/lib/python3.10/site-packages/torch/utils/collect_env.py ADDED
@@ -0,0 +1,624 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Unlike the rest of the PyTorch this file must be python2 compliant.
3
+ # This script outputs relevant system environment info
4
+ # Run it with `python collect_env.py` or `python -m torch.utils.collect_env`
5
+ import datetime
6
+ import locale
7
+ import re
8
+ import subprocess
9
+ import sys
10
+ import os
11
+ from collections import namedtuple
12
+
13
+
14
+ try:
15
+ import torch
16
+ TORCH_AVAILABLE = True
17
+ except (ImportError, NameError, AttributeError, OSError):
18
+ TORCH_AVAILABLE = False
19
+
20
+ # System Environment Information
21
+ SystemEnv = namedtuple('SystemEnv', [
22
+ 'torch_version',
23
+ 'is_debug_build',
24
+ 'cuda_compiled_version',
25
+ 'gcc_version',
26
+ 'clang_version',
27
+ 'cmake_version',
28
+ 'os',
29
+ 'libc_version',
30
+ 'python_version',
31
+ 'python_platform',
32
+ 'is_cuda_available',
33
+ 'cuda_runtime_version',
34
+ 'cuda_module_loading',
35
+ 'nvidia_driver_version',
36
+ 'nvidia_gpu_models',
37
+ 'cudnn_version',
38
+ 'pip_version', # 'pip' or 'pip3'
39
+ 'pip_packages',
40
+ 'conda_packages',
41
+ 'hip_compiled_version',
42
+ 'hip_runtime_version',
43
+ 'miopen_runtime_version',
44
+ 'caching_allocator_config',
45
+ 'is_xnnpack_available',
46
+ 'cpu_info',
47
+ ])
48
+
49
+ DEFAULT_CONDA_PATTERNS = {
50
+ "torch",
51
+ "numpy",
52
+ "cudatoolkit",
53
+ "soumith",
54
+ "mkl",
55
+ "magma",
56
+ "triton",
57
+ "optree",
58
+ }
59
+
60
+ DEFAULT_PIP_PATTERNS = {
61
+ "torch",
62
+ "numpy",
63
+ "mypy",
64
+ "flake8",
65
+ "triton",
66
+ "optree",
67
+ "onnx",
68
+ }
69
+
70
+
71
+ def run(command):
72
+ """Return (return-code, stdout, stderr)."""
73
+ shell = True if type(command) is str else False
74
+ p = subprocess.Popen(command, stdout=subprocess.PIPE,
75
+ stderr=subprocess.PIPE, shell=shell)
76
+ raw_output, raw_err = p.communicate()
77
+ rc = p.returncode
78
+ if get_platform() == 'win32':
79
+ enc = 'oem'
80
+ else:
81
+ enc = locale.getpreferredencoding()
82
+ output = raw_output.decode(enc)
83
+ err = raw_err.decode(enc)
84
+ return rc, output.strip(), err.strip()
85
+
86
+
87
+ def run_and_read_all(run_lambda, command):
88
+ """Run command using run_lambda; reads and returns entire output if rc is 0."""
89
+ rc, out, _ = run_lambda(command)
90
+ if rc != 0:
91
+ return None
92
+ return out
93
+
94
+
95
+ def run_and_parse_first_match(run_lambda, command, regex):
96
+ """Run command using run_lambda, returns the first regex match if it exists."""
97
+ rc, out, _ = run_lambda(command)
98
+ if rc != 0:
99
+ return None
100
+ match = re.search(regex, out)
101
+ if match is None:
102
+ return None
103
+ return match.group(1)
104
+
105
+ def run_and_return_first_line(run_lambda, command):
106
+ """Run command using run_lambda and returns first line if output is not empty."""
107
+ rc, out, _ = run_lambda(command)
108
+ if rc != 0:
109
+ return None
110
+ return out.split('\n')[0]
111
+
112
+
113
+ def get_conda_packages(run_lambda, patterns=None):
114
+ if patterns is None:
115
+ patterns = DEFAULT_CONDA_PATTERNS
116
+ conda = os.environ.get('CONDA_EXE', 'conda')
117
+ out = run_and_read_all(run_lambda, "{} list".format(conda))
118
+ if out is None:
119
+ return out
120
+
121
+ return "\n".join(
122
+ line
123
+ for line in out.splitlines()
124
+ if not line.startswith("#")
125
+ and any(name in line for name in patterns)
126
+ )
127
+
128
+ def get_gcc_version(run_lambda):
129
+ return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)')
130
+
131
+ def get_clang_version(run_lambda):
132
+ return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)')
133
+
134
+
135
+ def get_cmake_version(run_lambda):
136
+ return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)')
137
+
138
+
139
+ def get_nvidia_driver_version(run_lambda):
140
+ if get_platform() == 'darwin':
141
+ cmd = 'kextstat | grep -i cuda'
142
+ return run_and_parse_first_match(run_lambda, cmd,
143
+ r'com[.]nvidia[.]CUDA [(](.*?)[)]')
144
+ smi = get_nvidia_smi()
145
+ return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
146
+
147
+
148
+ def get_gpu_info(run_lambda):
149
+ if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None):
150
+ if TORCH_AVAILABLE and torch.cuda.is_available():
151
+ if torch.version.hip is not None:
152
+ prop = torch.cuda.get_device_properties(0)
153
+ if hasattr(prop, "gcnArchName"):
154
+ gcnArch = " ({})".format(prop.gcnArchName)
155
+ else:
156
+ gcnArch = "NoGCNArchNameOnOldPyTorch"
157
+ else:
158
+ gcnArch = ""
159
+ return torch.cuda.get_device_name(None) + gcnArch
160
+ return None
161
+ smi = get_nvidia_smi()
162
+ uuid_regex = re.compile(r' \(UUID: .+?\)')
163
+ rc, out, _ = run_lambda(smi + ' -L')
164
+ if rc != 0:
165
+ return None
166
+ # Anonymize GPUs by removing their UUID
167
+ return re.sub(uuid_regex, '', out)
168
+
169
+
170
+ def get_running_cuda_version(run_lambda):
171
+ return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)')
172
+
173
+
174
+ def get_cudnn_version(run_lambda):
175
+ """Return a list of libcudnn.so; it's hard to tell which one is being used."""
176
+ if get_platform() == 'win32':
177
+ system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
178
+ cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%")
179
+ where_cmd = os.path.join(system_root, 'System32', 'where')
180
+ cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path)
181
+ elif get_platform() == 'darwin':
182
+ # CUDA libraries and drivers can be found in /usr/local/cuda/. See
183
+ # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
184
+ # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
185
+ # Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
186
+ cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
187
+ else:
188
+ cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
189
+ rc, out, _ = run_lambda(cudnn_cmd)
190
+ # find will return 1 if there are permission errors or if not found
191
+ if len(out) == 0 or (rc != 1 and rc != 0):
192
+ l = os.environ.get('CUDNN_LIBRARY')
193
+ if l is not None and os.path.isfile(l):
194
+ return os.path.realpath(l)
195
+ return None
196
+ files_set = set()
197
+ for fn in out.split('\n'):
198
+ fn = os.path.realpath(fn) # eliminate symbolic links
199
+ if os.path.isfile(fn):
200
+ files_set.add(fn)
201
+ if not files_set:
202
+ return None
203
+ # Alphabetize the result because the order is non-deterministic otherwise
204
+ files = sorted(files_set)
205
+ if len(files) == 1:
206
+ return files[0]
207
+ result = '\n'.join(files)
208
+ return 'Probably one of the following:\n{}'.format(result)
209
+
210
+
211
+ def get_nvidia_smi():
212
+ # Note: nvidia-smi is currently available only on Windows and Linux
213
+ smi = 'nvidia-smi'
214
+ if get_platform() == 'win32':
215
+ system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
216
+ program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files')
217
+ legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi)
218
+ new_path = os.path.join(system_root, 'System32', smi)
219
+ smis = [new_path, legacy_path]
220
+ for candidate_smi in smis:
221
+ if os.path.exists(candidate_smi):
222
+ smi = '"{}"'.format(candidate_smi)
223
+ break
224
+ return smi
225
+
226
+
227
+ # example outputs of CPU infos
228
+ # * linux
229
+ # Architecture: x86_64
230
+ # CPU op-mode(s): 32-bit, 64-bit
231
+ # Address sizes: 46 bits physical, 48 bits virtual
232
+ # Byte Order: Little Endian
233
+ # CPU(s): 128
234
+ # On-line CPU(s) list: 0-127
235
+ # Vendor ID: GenuineIntel
236
+ # Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
237
+ # CPU family: 6
238
+ # Model: 106
239
+ # Thread(s) per core: 2
240
+ # Core(s) per socket: 32
241
+ # Socket(s): 2
242
+ # Stepping: 6
243
+ # BogoMIPS: 5799.78
244
+ # Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr
245
+ # sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl
246
+ # xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16
247
+ # pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand
248
+ # hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced
249
+ # fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap
250
+ # avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1
251
+ # xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq
252
+ # avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities
253
+ # Virtualization features:
254
+ # Hypervisor vendor: KVM
255
+ # Virtualization type: full
256
+ # Caches (sum of all):
257
+ # L1d: 3 MiB (64 instances)
258
+ # L1i: 2 MiB (64 instances)
259
+ # L2: 80 MiB (64 instances)
260
+ # L3: 108 MiB (2 instances)
261
+ # NUMA:
262
+ # NUMA node(s): 2
263
+ # NUMA node0 CPU(s): 0-31,64-95
264
+ # NUMA node1 CPU(s): 32-63,96-127
265
+ # Vulnerabilities:
266
+ # Itlb multihit: Not affected
267
+ # L1tf: Not affected
268
+ # Mds: Not affected
269
+ # Meltdown: Not affected
270
+ # Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
271
+ # Retbleed: Not affected
272
+ # Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
273
+ # Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
274
+ # Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
275
+ # Srbds: Not affected
276
+ # Tsx async abort: Not affected
277
+ # * win32
278
+ # Architecture=9
279
+ # CurrentClockSpeed=2900
280
+ # DeviceID=CPU0
281
+ # Family=179
282
+ # L2CacheSize=40960
283
+ # L2CacheSpeed=
284
+ # Manufacturer=GenuineIntel
285
+ # MaxClockSpeed=2900
286
+ # Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
287
+ # ProcessorType=3
288
+ # Revision=27142
289
+ #
290
+ # Architecture=9
291
+ # CurrentClockSpeed=2900
292
+ # DeviceID=CPU1
293
+ # Family=179
294
+ # L2CacheSize=40960
295
+ # L2CacheSpeed=
296
+ # Manufacturer=GenuineIntel
297
+ # MaxClockSpeed=2900
298
+ # Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
299
+ # ProcessorType=3
300
+ # Revision=27142
301
+
302
+ def get_cpu_info(run_lambda):
303
+ rc, out, err = 0, '', ''
304
+ if get_platform() == 'linux':
305
+ rc, out, err = run_lambda('lscpu')
306
+ elif get_platform() == 'win32':
307
+ rc, out, err = run_lambda('wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID, \
308
+ CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE')
309
+ elif get_platform() == 'darwin':
310
+ rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string")
311
+ cpu_info = 'None'
312
+ if rc == 0:
313
+ cpu_info = out
314
+ else:
315
+ cpu_info = err
316
+ return cpu_info
317
+
318
+
319
+ def get_platform():
320
+ if sys.platform.startswith('linux'):
321
+ return 'linux'
322
+ elif sys.platform.startswith('win32'):
323
+ return 'win32'
324
+ elif sys.platform.startswith('cygwin'):
325
+ return 'cygwin'
326
+ elif sys.platform.startswith('darwin'):
327
+ return 'darwin'
328
+ else:
329
+ return sys.platform
330
+
331
+
332
+ def get_mac_version(run_lambda):
333
+ return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
334
+
335
+
336
+ def get_windows_version(run_lambda):
337
+ system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
338
+ wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic')
339
+ findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
340
+ return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd))
341
+
342
+
343
+ def get_lsb_version(run_lambda):
344
+ return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
345
+
346
+
347
+ def check_release_file(run_lambda):
348
+ return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
349
+ r'PRETTY_NAME="(.*)"')
350
+
351
+
352
+ def get_os(run_lambda):
353
+ from platform import machine
354
+ platform = get_platform()
355
+
356
+ if platform == 'win32' or platform == 'cygwin':
357
+ return get_windows_version(run_lambda)
358
+
359
+ if platform == 'darwin':
360
+ version = get_mac_version(run_lambda)
361
+ if version is None:
362
+ return None
363
+ return 'macOS {} ({})'.format(version, machine())
364
+
365
+ if platform == 'linux':
366
+ # Ubuntu/Debian based
367
+ desc = get_lsb_version(run_lambda)
368
+ if desc is not None:
369
+ return '{} ({})'.format(desc, machine())
370
+
371
+ # Try reading /etc/*-release
372
+ desc = check_release_file(run_lambda)
373
+ if desc is not None:
374
+ return '{} ({})'.format(desc, machine())
375
+
376
+ return '{} ({})'.format(platform, machine())
377
+
378
+ # Unknown platform
379
+ return platform
380
+
381
+
382
+ def get_python_platform():
383
+ import platform
384
+ return platform.platform()
385
+
386
+
387
+ def get_libc_version():
388
+ import platform
389
+ if get_platform() != 'linux':
390
+ return 'N/A'
391
+ return '-'.join(platform.libc_ver())
392
+
393
+
394
+ def get_pip_packages(run_lambda, patterns=None):
395
+ """Return `pip list` output. Note: will also find conda-installed pytorch and numpy packages."""
396
+ if patterns is None:
397
+ patterns = DEFAULT_PIP_PATTERNS
398
+
399
+ # People generally have `pip` as `pip` or `pip3`
400
+ # But here it is invoked as `python -mpip`
401
+ def run_with_pip(pip):
402
+ out = run_and_read_all(run_lambda, pip + ["list", "--format=freeze"])
403
+ return "\n".join(
404
+ line
405
+ for line in out.splitlines()
406
+ if any(name in line for name in patterns)
407
+ )
408
+
409
+ pip_version = 'pip3' if sys.version[0] == '3' else 'pip'
410
+ out = run_with_pip([sys.executable, '-mpip'])
411
+
412
+ return pip_version, out
413
+
414
+
415
+ def get_cachingallocator_config():
416
+ ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '')
417
+ return ca_config
418
+
419
+
420
+ def get_cuda_module_loading_config():
421
+ if TORCH_AVAILABLE and torch.cuda.is_available():
422
+ torch.cuda.init()
423
+ config = os.environ.get('CUDA_MODULE_LOADING', '')
424
+ return config
425
+ else:
426
+ return "N/A"
427
+
428
+
429
+ def is_xnnpack_available():
430
+ if TORCH_AVAILABLE:
431
+ import torch.backends.xnnpack
432
+ return str(torch.backends.xnnpack.enabled) # type: ignore[attr-defined]
433
+ else:
434
+ return "N/A"
435
+
436
+ def get_env_info():
437
+ run_lambda = run
438
+ pip_version, pip_list_output = get_pip_packages(run_lambda)
439
+
440
+ if TORCH_AVAILABLE:
441
+ version_str = torch.__version__
442
+ debug_mode_str = str(torch.version.debug)
443
+ cuda_available_str = str(torch.cuda.is_available())
444
+ cuda_version_str = torch.version.cuda
445
+ if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
446
+ hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
447
+ else: # HIP version
448
+ def get_version_or_na(cfg, prefix):
449
+ _lst = [s.rsplit(None, 1)[-1] for s in cfg if prefix in s]
450
+ return _lst[0] if _lst else 'N/A'
451
+
452
+ cfg = torch._C._show_config().split('\n')
453
+ hip_runtime_version = get_version_or_na(cfg, 'HIP Runtime')
454
+ miopen_runtime_version = get_version_or_na(cfg, 'MIOpen')
455
+ cuda_version_str = 'N/A'
456
+ hip_compiled_version = torch.version.hip
457
+ else:
458
+ version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
459
+ hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
460
+
461
+ sys_version = sys.version.replace("\n", " ")
462
+
463
+ conda_packages = get_conda_packages(run_lambda)
464
+
465
+ return SystemEnv(
466
+ torch_version=version_str,
467
+ is_debug_build=debug_mode_str,
468
+ python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1),
469
+ python_platform=get_python_platform(),
470
+ is_cuda_available=cuda_available_str,
471
+ cuda_compiled_version=cuda_version_str,
472
+ cuda_runtime_version=get_running_cuda_version(run_lambda),
473
+ cuda_module_loading=get_cuda_module_loading_config(),
474
+ nvidia_gpu_models=get_gpu_info(run_lambda),
475
+ nvidia_driver_version=get_nvidia_driver_version(run_lambda),
476
+ cudnn_version=get_cudnn_version(run_lambda),
477
+ hip_compiled_version=hip_compiled_version,
478
+ hip_runtime_version=hip_runtime_version,
479
+ miopen_runtime_version=miopen_runtime_version,
480
+ pip_version=pip_version,
481
+ pip_packages=pip_list_output,
482
+ conda_packages=conda_packages,
483
+ os=get_os(run_lambda),
484
+ libc_version=get_libc_version(),
485
+ gcc_version=get_gcc_version(run_lambda),
486
+ clang_version=get_clang_version(run_lambda),
487
+ cmake_version=get_cmake_version(run_lambda),
488
+ caching_allocator_config=get_cachingallocator_config(),
489
+ is_xnnpack_available=is_xnnpack_available(),
490
+ cpu_info=get_cpu_info(run_lambda),
491
+ )
492
+
493
+ env_info_fmt = """
494
+ PyTorch version: {torch_version}
495
+ Is debug build: {is_debug_build}
496
+ CUDA used to build PyTorch: {cuda_compiled_version}
497
+ ROCM used to build PyTorch: {hip_compiled_version}
498
+
499
+ OS: {os}
500
+ GCC version: {gcc_version}
501
+ Clang version: {clang_version}
502
+ CMake version: {cmake_version}
503
+ Libc version: {libc_version}
504
+
505
+ Python version: {python_version}
506
+ Python platform: {python_platform}
507
+ Is CUDA available: {is_cuda_available}
508
+ CUDA runtime version: {cuda_runtime_version}
509
+ CUDA_MODULE_LOADING set to: {cuda_module_loading}
510
+ GPU models and configuration: {nvidia_gpu_models}
511
+ Nvidia driver version: {nvidia_driver_version}
512
+ cuDNN version: {cudnn_version}
513
+ HIP runtime version: {hip_runtime_version}
514
+ MIOpen runtime version: {miopen_runtime_version}
515
+ Is XNNPACK available: {is_xnnpack_available}
516
+
517
+ CPU:
518
+ {cpu_info}
519
+
520
+ Versions of relevant libraries:
521
+ {pip_packages}
522
+ {conda_packages}
523
+ """.strip()
524
+
525
+
526
+ def pretty_str(envinfo):
527
+ def replace_nones(dct, replacement='Could not collect'):
528
+ for key in dct.keys():
529
+ if dct[key] is not None:
530
+ continue
531
+ dct[key] = replacement
532
+ return dct
533
+
534
+ def replace_bools(dct, true='Yes', false='No'):
535
+ for key in dct.keys():
536
+ if dct[key] is True:
537
+ dct[key] = true
538
+ elif dct[key] is False:
539
+ dct[key] = false
540
+ return dct
541
+
542
+ def prepend(text, tag='[prepend]'):
543
+ lines = text.split('\n')
544
+ updated_lines = [tag + line for line in lines]
545
+ return '\n'.join(updated_lines)
546
+
547
+ def replace_if_empty(text, replacement='No relevant packages'):
548
+ if text is not None and len(text) == 0:
549
+ return replacement
550
+ return text
551
+
552
+ def maybe_start_on_next_line(string):
553
+ # If `string` is multiline, prepend a \n to it.
554
+ if string is not None and len(string.split('\n')) > 1:
555
+ return '\n{}\n'.format(string)
556
+ return string
557
+
558
+ mutable_dict = envinfo._asdict()
559
+
560
+ # If nvidia_gpu_models is multiline, start on the next line
561
+ mutable_dict['nvidia_gpu_models'] = \
562
+ maybe_start_on_next_line(envinfo.nvidia_gpu_models)
563
+
564
+ # If the machine doesn't have CUDA, report some fields as 'No CUDA'
565
+ dynamic_cuda_fields = [
566
+ 'cuda_runtime_version',
567
+ 'nvidia_gpu_models',
568
+ 'nvidia_driver_version',
569
+ ]
570
+ all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
571
+ all_dynamic_cuda_fields_missing = all(
572
+ mutable_dict[field] is None for field in dynamic_cuda_fields)
573
+ if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
574
+ for field in all_cuda_fields:
575
+ mutable_dict[field] = 'No CUDA'
576
+ if envinfo.cuda_compiled_version is None:
577
+ mutable_dict['cuda_compiled_version'] = 'None'
578
+
579
+ # Replace True with Yes, False with No
580
+ mutable_dict = replace_bools(mutable_dict)
581
+
582
+ # Replace all None objects with 'Could not collect'
583
+ mutable_dict = replace_nones(mutable_dict)
584
+
585
+ # If either of these are '', replace with 'No relevant packages'
586
+ mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages'])
587
+ mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages'])
588
+
589
+ # Tag conda and pip packages with a prefix
590
+ # If they were previously None, they'll show up as ie '[conda] Could not collect'
591
+ if mutable_dict['pip_packages']:
592
+ mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'],
593
+ '[{}] '.format(envinfo.pip_version))
594
+ if mutable_dict['conda_packages']:
595
+ mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'],
596
+ '[conda] ')
597
+ mutable_dict['cpu_info'] = envinfo.cpu_info
598
+ return env_info_fmt.format(**mutable_dict)
599
+
600
+
601
+ def get_pretty_env_info():
602
+ return pretty_str(get_env_info())
603
+
604
+
605
+ def main():
606
+ print("Collecting environment information...")
607
+ output = get_pretty_env_info()
608
+ print(output)
609
+
610
+ if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'):
611
+ minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR
612
+ if sys.platform == "linux" and os.path.exists(minidump_dir):
613
+ dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)]
614
+ latest = max(dumps, key=os.path.getctime)
615
+ ctime = os.path.getctime(latest)
616
+ creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S')
617
+ msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \
618
+ "if this is related to your bug please include it when you file a report ***"
619
+ print(msg, file=sys.stderr)
620
+
621
+
622
+
623
+ if __name__ == '__main__':
624
+ main()
llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_backtrace.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch._C import _get_cpp_backtrace
2
+
3
+ def get_cpp_backtrace(frames_to_skip=0, maximum_number_of_frames=64) -> str:
4
+ r"""
5
+ Return a string containing the C++ stack trace of the current thread.
6
+
7
+ Args:
8
+ frames_to_skip (int): the number of frames to skip from the top of the stack
9
+ maximum_number_of_frames (int): the maximum number of frames to return
10
+ """
11
+ return _get_cpp_backtrace(frames_to_skip, maximum_number_of_frames)
llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_extension.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO(VitalyFedyunin): Rearranging this imports leads to crash,
2
+ # need to cleanup dependencies and fix it
3
+ from torch.utils.data.sampler import (
4
+ BatchSampler,
5
+ RandomSampler,
6
+ Sampler,
7
+ SequentialSampler,
8
+ SubsetRandomSampler,
9
+ WeightedRandomSampler,
10
+ )
11
+ from torch.utils.data.dataset import (
12
+ ChainDataset,
13
+ ConcatDataset,
14
+ Dataset,
15
+ IterableDataset,
16
+ StackDataset,
17
+ Subset,
18
+ TensorDataset,
19
+ random_split,
20
+ )
21
+ from torch.utils.data.datapipes.datapipe import (
22
+ DFIterDataPipe,
23
+ DataChunk,
24
+ IterDataPipe,
25
+ MapDataPipe,
26
+ )
27
+ from torch.utils.data.dataloader import (
28
+ DataLoader,
29
+ _DatasetKind,
30
+ get_worker_info,
31
+ default_collate,
32
+ default_convert,
33
+ )
34
+ from torch.utils.data.distributed import DistributedSampler
35
+ from torch.utils.data.datapipes._decorator import (
36
+ argument_validation,
37
+ functional_datapipe,
38
+ guaranteed_datapipes_determinism,
39
+ non_deterministic,
40
+ runtime_validation,
41
+ runtime_validation_disabled,
42
+ )
43
+
44
+ __all__ = ['BatchSampler',
45
+ 'ChainDataset',
46
+ 'ConcatDataset',
47
+ 'DFIterDataPipe',
48
+ 'DataChunk',
49
+ 'DataLoader',
50
+ 'Dataset',
51
+ 'DistributedSampler',
52
+ 'IterDataPipe',
53
+ 'IterableDataset',
54
+ 'MapDataPipe',
55
+ 'RandomSampler',
56
+ 'Sampler',
57
+ 'SequentialSampler',
58
+ 'StackDataset',
59
+ 'Subset',
60
+ 'SubsetRandomSampler',
61
+ 'TensorDataset',
62
+ 'WeightedRandomSampler',
63
+ '_DatasetKind',
64
+ 'argument_validation',
65
+ 'default_collate',
66
+ 'default_convert',
67
+ 'functional_datapipe',
68
+ 'get_worker_info',
69
+ 'guaranteed_datapipes_determinism',
70
+ 'non_deterministic',
71
+ 'random_split',
72
+ 'runtime_validation',
73
+ 'runtime_validation_disabled']
74
+
75
+ # Please keep this list sorted
76
+ assert __all__ == sorted(__all__)
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc ADDED
Binary file (481 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc ADDED
Binary file (28 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataset.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (5.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph.cpython-310.pyc ADDED
Binary file (4.61 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph_settings.cpython-310.pyc ADDED
Binary file (4.85 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/sampler.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ def worker_init_fn(worker_id):
4
+ warnings.warn("Usage of backward_compatibility.worker_init_fn is deprecated"
5
+ " as DataLoader automatically applies sharding in every worker")
llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataloader.py ADDED
@@ -0,0 +1,1479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter.
2
+
3
+ To support these two classes, in `./_utils` we define many utility methods and
4
+ functions to be run in multiprocessing. E.g., the data loading worker loop is
5
+ in `./_utils/worker.py`.
6
+ """
7
+
8
+ import functools
9
+ import itertools
10
+ import logging
11
+ import os
12
+ import queue
13
+ import threading
14
+ import warnings
15
+
16
+ from typing import Any, Callable, Iterable, TypeVar, Generic, List, Optional, Union
17
+
18
+ import multiprocessing as python_multiprocessing
19
+ import torch
20
+ import torch.distributed as dist
21
+ import torch.multiprocessing as multiprocessing
22
+ import torch.utils.data.graph_settings
23
+
24
+ from torch._utils import ExceptionWrapper
25
+
26
+ from . import (
27
+ IterDataPipe,
28
+ MapDataPipe,
29
+ IterableDataset,
30
+ Sampler,
31
+ SequentialSampler,
32
+ RandomSampler,
33
+ BatchSampler,
34
+ Dataset,)
35
+
36
+ from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper
37
+
38
+ from . import _utils
39
+
40
+ __all__ = [
41
+ "DataLoader",
42
+ "get_worker_info",
43
+ "default_collate",
44
+ "default_convert",
45
+ ]
46
+
47
+ T_co = TypeVar('T_co', covariant=True)
48
+ T = TypeVar('T')
49
+ _worker_init_fn_t = Callable[[int], None]
50
+
51
+ # Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that
52
+ # type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'.
53
+ # See https://github.com/python/mypy/issues/3737.
54
+ _collate_fn_t = Callable[[List[T]], Any]
55
+
56
+
57
+ # These functions used to be defined in this file. However, it was moved to
58
+ # _utils/collate.py. Although it is rather hard to access this from user land
59
+ # (one has to explicitly directly `import torch.utils.data.dataloader`), there
60
+ # probably is user code out there using it. This aliasing maintains BC in this
61
+ # aspect.
62
+ default_collate: _collate_fn_t = _utils.collate.default_collate
63
+ default_convert = _utils.collate.default_convert
64
+
65
+ get_worker_info = _utils.worker.get_worker_info
66
+
67
+ logger = logging.getLogger(__name__)
68
+
69
+
70
+ class _DatasetKind:
71
+ Map = 0
72
+ Iterable = 1
73
+
74
+ @staticmethod
75
+ def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last):
76
+ if kind == _DatasetKind.Map:
77
+ return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
78
+ else:
79
+ return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
80
+
81
+
82
+ class _InfiniteConstantSampler(Sampler):
83
+ r"""Analogous to ``itertools.repeat(None, None)``.
84
+
85
+ Used as sampler for :class:`~torch.utils.data.IterableDataset`.
86
+ """
87
+
88
+ def __iter__(self):
89
+ while True:
90
+ yield None
91
+
92
+
93
+ def _get_distributed_settings():
94
+ if dist.is_available() and dist.is_initialized():
95
+ return dist.get_world_size(), dist.get_rank()
96
+ else:
97
+ return 1, 0
98
+
99
+
100
+ def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id):
101
+ global_worker_id = worker_id
102
+ info = torch.utils.data.get_worker_info()
103
+ assert info is not None
104
+ total_workers = info.num_workers
105
+ datapipe = info.dataset
106
+ assert isinstance(datapipe, (IterDataPipe, MapDataPipe))
107
+ # To distribute elements across distributed process evenly, we should shard data on distributed
108
+ # processes first then shard on worker processes
109
+ total_workers *= world_size
110
+ global_worker_id = global_worker_id * world_size + rank_id
111
+ # For BC, use default SHARDING_PRIORITIES
112
+ torch.utils.data.graph_settings.apply_sharding(datapipe, total_workers, global_worker_id)
113
+ if worker_init_fn is not None:
114
+ worker_init_fn(worker_id)
115
+
116
+
117
+ def _share_dist_seed(generator, pg):
118
+ _shared_seed = torch.empty((), dtype=torch.int64).random_(generator=generator)
119
+ if isinstance(pg, dist.ProcessGroup):
120
+ dist.broadcast(_shared_seed, src=0, group=pg)
121
+ return _shared_seed.item()
122
+
123
+
124
+ class DataLoader(Generic[T_co]):
125
+ r"""
126
+ Data loader combines a dataset and a sampler, and provides an iterable over the given dataset.
127
+
128
+ The :class:`~torch.utils.data.DataLoader` supports both map-style and
129
+ iterable-style datasets with single- or multi-process loading, customizing
130
+ loading order and optional automatic batching (collation) and memory pinning.
131
+
132
+ See :py:mod:`torch.utils.data` documentation page for more details.
133
+
134
+ Args:
135
+ dataset (Dataset): dataset from which to load the data.
136
+ batch_size (int, optional): how many samples per batch to load
137
+ (default: ``1``).
138
+ shuffle (bool, optional): set to ``True`` to have the data reshuffled
139
+ at every epoch (default: ``False``).
140
+ sampler (Sampler or Iterable, optional): defines the strategy to draw
141
+ samples from the dataset. Can be any ``Iterable`` with ``__len__``
142
+ implemented. If specified, :attr:`shuffle` must not be specified.
143
+ batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but
144
+ returns a batch of indices at a time. Mutually exclusive with
145
+ :attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`,
146
+ and :attr:`drop_last`.
147
+ num_workers (int, optional): how many subprocesses to use for data
148
+ loading. ``0`` means that the data will be loaded in the main process.
149
+ (default: ``0``)
150
+ collate_fn (Callable, optional): merges a list of samples to form a
151
+ mini-batch of Tensor(s). Used when using batched loading from a
152
+ map-style dataset.
153
+ pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
154
+ into device/CUDA pinned memory before returning them. If your data elements
155
+ are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
156
+ see the example below.
157
+ drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
158
+ if the dataset size is not divisible by the batch size. If ``False`` and
159
+ the size of dataset is not divisible by the batch size, then the last batch
160
+ will be smaller. (default: ``False``)
161
+ timeout (numeric, optional): if positive, the timeout value for collecting a batch
162
+ from workers. Should always be non-negative. (default: ``0``)
163
+ worker_init_fn (Callable, optional): If not ``None``, this will be called on each
164
+ worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
165
+ input, after seeding and before data loading. (default: ``None``)
166
+ multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If
167
+ ``None``, the default `multiprocessing context`_ of your operating system will
168
+ be used. (default: ``None``)
169
+ generator (torch.Generator, optional): If not ``None``, this RNG will be used
170
+ by RandomSampler to generate random indexes and multiprocessing to generate
171
+ ``base_seed`` for workers. (default: ``None``)
172
+ prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
173
+ in advance by each worker. ``2`` means there will be a total of
174
+ 2 * num_workers batches prefetched across all workers. (default value depends
175
+ on the set value for num_workers. If value of num_workers=0 default is ``None``.
176
+ Otherwise, if value of ``num_workers > 0`` default is ``2``).
177
+ persistent_workers (bool, optional): If ``True``, the data loader will not shut down
178
+ the worker processes after a dataset has been consumed once. This allows to
179
+ maintain the workers `Dataset` instances alive. (default: ``False``)
180
+ pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is
181
+ ``True``.
182
+
183
+
184
+ .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn`
185
+ cannot be an unpicklable object, e.g., a lambda function. See
186
+ :ref:`multiprocessing-best-practices` on more details related
187
+ to multiprocessing in PyTorch.
188
+
189
+ .. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used.
190
+ When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`,
191
+ it instead returns an estimate based on ``len(dataset) / batch_size``, with proper
192
+ rounding depending on :attr:`drop_last`, regardless of multi-process loading
193
+ configurations. This represents the best guess PyTorch can make because PyTorch
194
+ trusts user :attr:`dataset` code in correctly handling multi-process
195
+ loading to avoid duplicate data.
196
+
197
+ However, if sharding results in multiple workers having incomplete last batches,
198
+ this estimate can still be inaccurate, because (1) an otherwise complete batch can
199
+ be broken into multiple ones and (2) more than one batch worth of samples can be
200
+ dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such
201
+ cases in general.
202
+
203
+ See `Dataset Types`_ for more details on these two types of datasets and how
204
+ :class:`~torch.utils.data.IterableDataset` interacts with
205
+ `Multi-process data loading`_.
206
+
207
+ .. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and
208
+ :ref:`data-loading-randomness` notes for random seed related questions.
209
+
210
+ .. _multiprocessing context:
211
+ https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
212
+ """
213
+
214
+ dataset: Dataset[T_co]
215
+ batch_size: Optional[int]
216
+ num_workers: int
217
+ pin_memory: bool
218
+ drop_last: bool
219
+ timeout: float
220
+ sampler: Union[Sampler, Iterable]
221
+ pin_memory_device: str
222
+ prefetch_factor: Optional[int]
223
+ _iterator : Optional['_BaseDataLoaderIter']
224
+ __initialized = False
225
+
226
+ def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1,
227
+ shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None,
228
+ batch_sampler: Union[Sampler[List], Iterable[List], None] = None,
229
+ num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None,
230
+ pin_memory: bool = False, drop_last: bool = False,
231
+ timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None,
232
+ multiprocessing_context=None, generator=None,
233
+ *, prefetch_factor: Optional[int] = None,
234
+ persistent_workers: bool = False,
235
+ pin_memory_device: str = ""):
236
+ torch._C._log_api_usage_once("python.data_loader")
237
+
238
+ if num_workers < 0:
239
+ raise ValueError('num_workers option should be non-negative; '
240
+ 'use num_workers=0 to disable multiprocessing.')
241
+
242
+ if timeout < 0:
243
+ raise ValueError('timeout option should be non-negative')
244
+
245
+ if num_workers == 0 and prefetch_factor is not None:
246
+ raise ValueError('prefetch_factor option could only be specified in multiprocessing.'
247
+ 'let num_workers > 0 to enable multiprocessing, otherwise set prefetch_factor to None.')
248
+ elif num_workers > 0 and prefetch_factor is None:
249
+ prefetch_factor = 2
250
+ elif prefetch_factor is not None and prefetch_factor < 0:
251
+ raise ValueError('prefetch_factor option should be non-negative')
252
+
253
+ if persistent_workers and num_workers == 0:
254
+ raise ValueError('persistent_workers option needs num_workers > 0')
255
+
256
+ self.dataset = dataset
257
+ self.num_workers = num_workers
258
+ self.prefetch_factor = prefetch_factor
259
+ self.pin_memory = pin_memory
260
+ self.pin_memory_device = pin_memory_device
261
+ self.timeout = timeout
262
+ self.worker_init_fn = worker_init_fn
263
+ self.multiprocessing_context = multiprocessing_context
264
+
265
+ # Adds forward compatibilities so classic DataLoader can work with DataPipes:
266
+ # _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler
267
+ if isinstance(self.dataset, IterDataPipe):
268
+ self.dataset = _IterDataPipeSerializationWrapper(self.dataset)
269
+ elif isinstance(self.dataset, MapDataPipe):
270
+ self.dataset = _MapDataPipeSerializationWrapper(self.dataset)
271
+
272
+ # Arg-check dataset related before checking samplers because we want to
273
+ # tell users that iterable-style datasets are incompatible with custom
274
+ # samplers first, so that they don't learn that this combo doesn't work
275
+ # after spending time fixing the custom sampler errors.
276
+ if isinstance(dataset, IterableDataset):
277
+ self._dataset_kind = _DatasetKind.Iterable
278
+ # NOTE [ Custom Samplers and IterableDataset ]
279
+ #
280
+ # `IterableDataset` does not support custom `batch_sampler` or
281
+ # `sampler` since the key is irrelevant (unless we support
282
+ # generator-style dataset one day...).
283
+ #
284
+ # For `sampler`, we always create a dummy sampler. This is an
285
+ # infinite sampler even when the dataset may have an implemented
286
+ # finite `__len__` because in multi-process data loading, naive
287
+ # settings will return duplicated data (which may be desired), and
288
+ # thus using a sampler with length matching that of dataset will
289
+ # cause data lost (you may have duplicates of the first couple
290
+ # batches, but never see anything afterwards). Therefore,
291
+ # `Iterabledataset` always uses an infinite sampler, an instance of
292
+ # `_InfiniteConstantSampler` defined above.
293
+ #
294
+ # A custom `batch_sampler` essentially only controls the batch size.
295
+ # However, it is unclear how useful it would be since an iterable-style
296
+ # dataset can handle that within itself. Moreover, it is pointless
297
+ # in multi-process data loading as the assignment order of batches
298
+ # to workers is an implementation detail so users can not control
299
+ # how to batchify each worker's iterable. Thus, we disable this
300
+ # option. If this turns out to be useful in future, we can re-enable
301
+ # this, and support custom samplers that specify the assignments to
302
+ # specific workers.
303
+ if isinstance(dataset, IterDataPipe):
304
+ if shuffle is not None:
305
+ dataset = torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
306
+ # We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default.
307
+ elif shuffle not in {False, None}:
308
+ raise ValueError(
309
+ f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}")
310
+
311
+ if sampler is not None:
312
+ # See NOTE [ Custom Samplers and IterableDataset ]
313
+ raise ValueError(
314
+ f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}")
315
+ elif batch_sampler is not None:
316
+ # See NOTE [ Custom Samplers and IterableDataset ]
317
+ raise ValueError(
318
+ "DataLoader with IterableDataset: expected unspecified "
319
+ f"batch_sampler option, but got batch_sampler={batch_sampler}")
320
+ else:
321
+ shuffle = bool(shuffle)
322
+ self._dataset_kind = _DatasetKind.Map
323
+
324
+
325
+
326
+ if sampler is not None and shuffle:
327
+ raise ValueError('sampler option is mutually exclusive with '
328
+ 'shuffle')
329
+
330
+ if batch_sampler is not None:
331
+ # auto_collation with custom batch_sampler
332
+ if batch_size != 1 or shuffle or sampler is not None or drop_last:
333
+ raise ValueError('batch_sampler option is mutually exclusive '
334
+ 'with batch_size, shuffle, sampler, and '
335
+ 'drop_last')
336
+ batch_size = None
337
+ drop_last = False
338
+ elif batch_size is None:
339
+ # no auto_collation
340
+ if drop_last:
341
+ raise ValueError('batch_size=None option disables auto-batching '
342
+ 'and is mutually exclusive with drop_last')
343
+
344
+ if sampler is None: # give default samplers
345
+ if self._dataset_kind == _DatasetKind.Iterable:
346
+ # See NOTE [ Custom Samplers and IterableDataset ]
347
+ sampler = _InfiniteConstantSampler()
348
+ else: # map-style
349
+ if shuffle:
350
+ sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type]
351
+ else:
352
+ sampler = SequentialSampler(dataset) # type: ignore[arg-type]
353
+
354
+ if batch_size is not None and batch_sampler is None:
355
+ # auto_collation without custom batch_sampler
356
+ batch_sampler = BatchSampler(sampler, batch_size, drop_last)
357
+
358
+ self.batch_size = batch_size
359
+ self.drop_last = drop_last
360
+ self.sampler = sampler
361
+ self.batch_sampler = batch_sampler
362
+ self.generator = generator
363
+
364
+ if collate_fn is None:
365
+ if self._auto_collation:
366
+ collate_fn = _utils.collate.default_collate
367
+ else:
368
+ collate_fn = _utils.collate.default_convert
369
+
370
+ self.collate_fn = collate_fn
371
+ self.persistent_workers = persistent_workers
372
+
373
+ self.__initialized = True
374
+ self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ]
375
+
376
+ self._iterator = None
377
+
378
+ self.check_worker_number_rationality()
379
+
380
+ torch.set_vital('Dataloader', 'enabled', 'True') # type: ignore[attr-defined]
381
+
382
+ def _get_iterator(self) -> '_BaseDataLoaderIter':
383
+ if self.num_workers == 0:
384
+ return _SingleProcessDataLoaderIter(self)
385
+ else:
386
+ self.check_worker_number_rationality()
387
+ return _MultiProcessingDataLoaderIter(self)
388
+
389
+ @property
390
+ def multiprocessing_context(self):
391
+ return self.__multiprocessing_context
392
+
393
+ @multiprocessing_context.setter
394
+ def multiprocessing_context(self, multiprocessing_context):
395
+ if multiprocessing_context is not None:
396
+ if self.num_workers > 0:
397
+ if isinstance(multiprocessing_context, str):
398
+ valid_start_methods = multiprocessing.get_all_start_methods()
399
+ if multiprocessing_context not in valid_start_methods:
400
+ raise ValueError(
401
+ 'multiprocessing_context option '
402
+ f'should specify a valid start method in {valid_start_methods!r}, but got '
403
+ f'multiprocessing_context={multiprocessing_context!r}')
404
+ multiprocessing_context = multiprocessing.get_context(multiprocessing_context)
405
+
406
+ if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext):
407
+ raise TypeError('multiprocessing_context option should be a valid context '
408
+ 'object or a string specifying the start method, but got '
409
+ f'multiprocessing_context={multiprocessing_context}')
410
+ else:
411
+ raise ValueError('multiprocessing_context can only be used with '
412
+ 'multi-process loading (num_workers > 0), but got '
413
+ f'num_workers={self.num_workers}')
414
+
415
+ self.__multiprocessing_context = multiprocessing_context
416
+
417
+ def __setattr__(self, attr, val):
418
+ if self.__initialized and attr in (
419
+ 'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'):
420
+ raise ValueError(f'{attr} attribute should not be set after {self.__class__.__name__} is initialized')
421
+
422
+ super().__setattr__(attr, val)
423
+
424
+ # We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up
425
+ # since '_BaseDataLoaderIter' references 'DataLoader'.
426
+ def __iter__(self) -> '_BaseDataLoaderIter':
427
+ # When using a single worker the returned iterator should be
428
+ # created everytime to avoid resetting its state
429
+ # However, in the case of a multiple workers iterator
430
+ # the iterator is only created once in the lifetime of the
431
+ # DataLoader object so that workers can be reused
432
+ if self.persistent_workers and self.num_workers > 0:
433
+ if self._iterator is None:
434
+ self._iterator = self._get_iterator()
435
+ else:
436
+ self._iterator._reset(self)
437
+ return self._iterator
438
+ else:
439
+ return self._get_iterator()
440
+
441
+ @property
442
+ def _auto_collation(self):
443
+ return self.batch_sampler is not None
444
+
445
+ @property
446
+ def _index_sampler(self):
447
+ # The actual sampler used for generating indices for `_DatasetFetcher`
448
+ # (see _utils/fetch.py) to read data at each time. This would be
449
+ # `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise.
450
+ # We can't change `.sampler` and `.batch_sampler` attributes for BC
451
+ # reasons.
452
+ if self._auto_collation:
453
+ return self.batch_sampler
454
+ else:
455
+ return self.sampler
456
+
457
+ def __len__(self) -> int:
458
+ if self._dataset_kind == _DatasetKind.Iterable:
459
+ # NOTE [ IterableDataset and __len__ ]
460
+ #
461
+ # For `IterableDataset`, `__len__` could be inaccurate when one naively
462
+ # does multi-processing data loading, since the samples will be duplicated.
463
+ # However, no real use case should be actually using that behavior, so
464
+ # it should count as a user error. We should generally trust user
465
+ # code to do the proper thing (e.g., configure each replica differently
466
+ # in `__iter__`), and give us the correct `__len__` if they choose to
467
+ # implement it (this will still throw if the dataset does not implement
468
+ # a `__len__`).
469
+ #
470
+ # To provide a further warning, we track if `__len__` was called on the
471
+ # `DataLoader`, save the returned value in `self._len_called`, and warn
472
+ # if the iterator ends up yielding more than this number of samples.
473
+
474
+ # Cannot statically verify that dataset is Sized
475
+ length = self._IterableDataset_len_called = len(self.dataset) # type: ignore[assignment, arg-type]
476
+ if self.batch_size is not None: # IterableDataset doesn't allow custom sampler or batch_sampler
477
+ from math import ceil
478
+ if self.drop_last:
479
+ length = length // self.batch_size
480
+ else:
481
+ length = ceil(length / self.batch_size)
482
+ return length
483
+ else:
484
+ return len(self._index_sampler)
485
+
486
+ def check_worker_number_rationality(self):
487
+ # This function check whether the dataloader's worker number is rational based on
488
+ # current system's resource. Current rule is that if the number of workers this
489
+ # Dataloader will create is bigger than the number of logical cpus that is allowed to
490
+ # use, than we will pop up a warning to let user pay attention.
491
+ #
492
+ # eg. If current system has 2 physical CPUs with 16 cores each. And each core support 2
493
+ # threads, then the total logical cpus here is 2 * 16 * 2 = 64. Let's say current
494
+ # DataLoader process can use half of them which is 32, then the rational max number of
495
+ # worker that initiated from this process is 32.
496
+ # Now, let's say the created DataLoader has num_works = 40, which is bigger than 32.
497
+ # So the warning message is triggered to notify the user to lower the worker number if
498
+ # necessary.
499
+ #
500
+ #
501
+ # [Note] Please note that this function repects `cpuset` only when os.sched_getaffinity is
502
+ # available (available in most of Linux system, but not OSX and Windows).
503
+ # When os.sched_getaffinity is not available, os.cpu_count() is called instead, but
504
+ # it doesn't repect cpuset.
505
+ # We don't take threading into account since each worker process is single threaded
506
+ # at this time.
507
+ #
508
+ # We don't set any threading flags (eg. OMP_NUM_THREADS, MKL_NUM_THREADS, etc)
509
+ # other than `torch.set_num_threads` to 1 in the worker process, if the passing
510
+ # in functions use 3rd party modules that rely on those threading flags to determine
511
+ # how many thread to create (eg. numpy, etc), then it is caller's responsibility to
512
+ # set those flags correctly.
513
+ def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked):
514
+
515
+ suggested_max_worker_msg = ((
516
+ "Our suggested max number of worker in current system is {}{}, which is smaller "
517
+ "than what this DataLoader is going to create.").format(
518
+ num_worker_suggest,
519
+ ("" if cpuset_checked else " (`cpuset` is not taken into account)"))
520
+ ) if num_worker_suggest is not None else (
521
+ "DataLoader is not able to compute a suggested max number of worker in current system.")
522
+
523
+ warn_msg = (
524
+ "This DataLoader will create {} worker processes in total. {} "
525
+ "Please be aware that excessive worker creation might get DataLoader running slow or even freeze, "
526
+ "lower the worker number to avoid potential slowness/freeze if necessary.").format(
527
+ num_worker_created,
528
+ suggested_max_worker_msg)
529
+ return warn_msg
530
+
531
+ if not self.num_workers or self.num_workers == 0:
532
+ return
533
+
534
+ # try to compute a suggested max number of worker based on system's resource
535
+ max_num_worker_suggest = None
536
+ cpuset_checked = False
537
+ if hasattr(os, 'sched_getaffinity'):
538
+ try:
539
+ max_num_worker_suggest = len(os.sched_getaffinity(0))
540
+ cpuset_checked = True
541
+ except Exception:
542
+ pass
543
+ if max_num_worker_suggest is None:
544
+ # os.cpu_count() could return Optional[int]
545
+ # get cpu count first and check None in order to satisfy mypy check
546
+ cpu_count = os.cpu_count()
547
+ if cpu_count is not None:
548
+ max_num_worker_suggest = cpu_count
549
+
550
+ if max_num_worker_suggest is None:
551
+ warnings.warn(_create_warning_msg(
552
+ max_num_worker_suggest,
553
+ self.num_workers,
554
+ cpuset_checked))
555
+ return
556
+
557
+ if self.num_workers > max_num_worker_suggest:
558
+ warnings.warn(_create_warning_msg(
559
+ max_num_worker_suggest,
560
+ self.num_workers,
561
+ cpuset_checked))
562
+
563
+
564
+ class _BaseDataLoaderIter:
565
+ def __init__(self, loader: DataLoader) -> None:
566
+ self._dataset = loader.dataset
567
+ self._shared_seed = None
568
+ self._pg = None
569
+ if isinstance(self._dataset, IterDataPipe):
570
+ if dist.is_available() and dist.is_initialized():
571
+ self._pg = dist.new_group(backend="gloo")
572
+ self._shared_seed = _share_dist_seed(loader.generator, self._pg)
573
+ shared_rng = torch.Generator()
574
+ shared_rng.manual_seed(self._shared_seed)
575
+ self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng)
576
+ self._dataset_kind = loader._dataset_kind
577
+ self._IterableDataset_len_called = loader._IterableDataset_len_called
578
+ self._auto_collation = loader._auto_collation
579
+ self._drop_last = loader.drop_last
580
+ self._index_sampler = loader._index_sampler
581
+ self._num_workers = loader.num_workers
582
+ ws, rank = _get_distributed_settings()
583
+ self._world_size = ws
584
+ self._rank = rank
585
+ # for other backends, pin_memory_device need to set. if not set
586
+ # default behaviour is CUDA device. if pin_memory_device is selected
587
+ # and pin_memory is not set, the default behaviour false.
588
+ if (len(loader.pin_memory_device) == 0):
589
+ self._pin_memory = loader.pin_memory and torch.cuda.is_available()
590
+ self._pin_memory_device = None
591
+ else:
592
+ if not loader.pin_memory:
593
+ warn_msg = ("pin memory device is set and pin_memory flag is not used then device pinned memory won't be used"
594
+ "please set pin_memory to true, if you need to use the device pin memory")
595
+ warnings.warn(warn_msg)
596
+
597
+ self._pin_memory = loader.pin_memory
598
+ self._pin_memory_device = loader.pin_memory_device
599
+ self._timeout = loader.timeout
600
+ self._collate_fn = loader.collate_fn
601
+ self._sampler_iter = iter(self._index_sampler)
602
+ self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item()
603
+ self._persistent_workers = loader.persistent_workers
604
+ self._num_yielded = 0
605
+ self._profile_name = f"enumerate(DataLoader)#{self.__class__.__name__}.__next__"
606
+
607
+ def __iter__(self) -> '_BaseDataLoaderIter':
608
+ return self
609
+
610
+ def _reset(self, loader, first_iter=False):
611
+ self._sampler_iter = iter(self._index_sampler)
612
+ self._num_yielded = 0
613
+ self._IterableDataset_len_called = loader._IterableDataset_len_called
614
+ if isinstance(self._dataset, IterDataPipe):
615
+ self._shared_seed = _share_dist_seed(loader.generator, self._pg)
616
+ shared_rng = torch.Generator()
617
+ shared_rng.manual_seed(self._shared_seed)
618
+ self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng)
619
+
620
+ def _next_index(self):
621
+ return next(self._sampler_iter) # may raise StopIteration
622
+
623
+ def _next_data(self):
624
+ raise NotImplementedError
625
+
626
+ def __next__(self) -> Any:
627
+ with torch.autograd.profiler.record_function(self._profile_name):
628
+ if self._sampler_iter is None:
629
+ # TODO(https://github.com/pytorch/pytorch/issues/76750)
630
+ self._reset() # type: ignore[call-arg]
631
+ data = self._next_data()
632
+ self._num_yielded += 1
633
+ if self._dataset_kind == _DatasetKind.Iterable and \
634
+ self._IterableDataset_len_called is not None and \
635
+ self._num_yielded > self._IterableDataset_len_called:
636
+ warn_msg = ("Length of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} "
637
+ "samples have been fetched. ").format(self._dataset, self._IterableDataset_len_called,
638
+ self._num_yielded)
639
+ if self._num_workers > 0:
640
+ warn_msg += ("For multiprocessing data-loading, this could be caused by not properly configuring the "
641
+ "IterableDataset replica at each worker. Please see "
642
+ "https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.")
643
+ warnings.warn(warn_msg)
644
+ return data
645
+
646
+ def __len__(self) -> int:
647
+ return len(self._index_sampler)
648
+
649
+ def __getstate__(self):
650
+ # TODO: add limited pickling support for sharing an iterator
651
+ # across multiple threads for HOGWILD.
652
+ # Probably the best way to do this is by moving the sample pushing
653
+ # to a separate thread and then just sharing the data queue
654
+ # but signalling the end is tricky without a non-blocking API
655
+ raise NotImplementedError("{} cannot be pickled", self.__class__.__name__)
656
+
657
+
658
+ class _SingleProcessDataLoaderIter(_BaseDataLoaderIter):
659
+ def __init__(self, loader):
660
+ super().__init__(loader)
661
+ assert self._timeout == 0
662
+ assert self._num_workers == 0
663
+
664
+ # Adds forward compatibilities so classic DataLoader can work with DataPipes:
665
+ # Taking care of distributed sharding
666
+ if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
667
+ # For BC, use default SHARDING_PRIORITIES
668
+ torch.utils.data.graph_settings.apply_sharding(self._dataset, self._world_size, self._rank)
669
+
670
+ self._dataset_fetcher = _DatasetKind.create_fetcher(
671
+ self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last)
672
+
673
+ def _next_data(self):
674
+ index = self._next_index() # may raise StopIteration
675
+ data = self._dataset_fetcher.fetch(index) # may raise StopIteration
676
+ if self._pin_memory:
677
+ data = _utils.pin_memory.pin_memory(data, self._pin_memory_device)
678
+ return data
679
+
680
+
681
+ class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter):
682
+ r"""Iterates once over the DataLoader's dataset, as specified by the sampler."""
683
+
684
+ # NOTE [ Data Loader Multiprocessing Shutdown Logic ]
685
+ #
686
+ # Preliminary:
687
+ #
688
+ # Our data model looks like this (queues are indicated with curly brackets):
689
+ #
690
+ # main process ||
691
+ # | ||
692
+ # {index_queue} ||
693
+ # | ||
694
+ # worker processes || DATA
695
+ # | ||
696
+ # {worker_result_queue} || FLOW
697
+ # | ||
698
+ # pin_memory_thread of main process || DIRECTION
699
+ # | ||
700
+ # {data_queue} ||
701
+ # | ||
702
+ # data output \/
703
+ #
704
+ # P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if
705
+ # `pin_memory=False`.
706
+ #
707
+ #
708
+ # Terminating multiprocessing logic requires very careful design. In
709
+ # particular, we need to make sure that
710
+ #
711
+ # 1. The iterator gracefully exits the workers when its last reference is
712
+ # gone or it is depleted.
713
+ #
714
+ # In this case, the workers should be gracefully exited because the
715
+ # main process may still need to continue to run, and we want cleaning
716
+ # up code in the workers to be executed (e.g., releasing GPU memory).
717
+ # Naturally, we implement the shutdown logic in `__del__` of
718
+ # DataLoaderIterator.
719
+ #
720
+ # We delay the discussion on the logic in this case until later.
721
+ #
722
+ # 2. The iterator exits the workers when the loader process and/or worker
723
+ # processes exits normally or with error.
724
+ #
725
+ # We set all workers and `pin_memory_thread` to have `daemon=True`.
726
+ #
727
+ # You may ask, why can't we make the workers non-daemonic, and
728
+ # gracefully exit using the same logic as we have in `__del__` when the
729
+ # iterator gets deleted (see 1 above)?
730
+ #
731
+ # First of all, `__del__` is **not** guaranteed to be called when
732
+ # interpreter exits. Even if it is called, by the time it executes,
733
+ # many Python core library resources may already be freed, and even
734
+ # simple things like acquiring an internal lock of a queue may hang.
735
+ # Therefore, in this case, we actually need to prevent `__del__` from
736
+ # being executed, and rely on the automatic termination of daemonic
737
+ # children.
738
+ #
739
+ # Thus, we register an `atexit` hook that sets a global flag
740
+ # `_utils.python_exit_status`. Since `atexit` hooks are executed in the
741
+ # reverse order of registration, we are guaranteed that this flag is
742
+ # set before library resources we use are freed (which, at least in
743
+ # CPython, is done via an `atexit` handler defined in
744
+ # `multiprocessing/util.py`
745
+ # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362
746
+ # registered when an object requiring this mechanism is first
747
+ # created, e.g., `mp.Queue`
748
+ # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103
749
+ # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29
750
+ # )
751
+ #
752
+ # So in `__del__`, we check if `_utils.python_exit_status` is set or
753
+ # `None` (freed), and perform no-op if so.
754
+ #
755
+ # However, simply letting library clean-up codes run can also be bad,
756
+ # because such codes (i.e., `multiprocessing.util._exit_function()`)
757
+ # include join putting threads for `mp.Queue`, which can be blocking.
758
+ # Hence, the main process putting threads are called with
759
+ # `cancel_join_thread` at creation. See later section
760
+ # [ 3b. A process won't hang when putting into a queue; ]
761
+ # for more details.
762
+ #
763
+ # Here are two example cases where library clean-up codes can run
764
+ # before `__del__` is called:
765
+ #
766
+ # 1. If we hold onto a reference to the iterator, it more often
767
+ # than not tries to do `multiprocessing` library cleaning before
768
+ # clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666)
769
+ # and thus prevents our cleaning-up code to run first.
770
+ #
771
+ # 2. A similar issue araises when a `DataLoader` is used in a subprocess.
772
+ # When a process ends, it shuts the all its daemonic children
773
+ # down with a SIGTERM (instead of joining them without a timeout).
774
+ # Simiarly for threads, but by a different mechanism. This fact,
775
+ # together with a few implementation details of multiprocessing, forces
776
+ # us to make workers daemonic. All of our problems arise when a
777
+ # DataLoader is used in a subprocess, and are caused by multiprocessing
778
+ # code which looks more or less like this:
779
+ #
780
+ # try:
781
+ # your_function_using_a_dataloader()
782
+ # finally:
783
+ # multiprocessing.util._exit_function()
784
+ #
785
+ # The joining/termination mentioned above happens inside
786
+ # `_exit_function()`. Now, if `your_function_using_a_dataloader()`
787
+ # throws, the stack trace stored in the exception will prevent the
788
+ # frame which uses `DataLoaderIter` to be freed. If the frame has any
789
+ # reference to the `DataLoaderIter` (e.g., in a method of the iter),
790
+ # its `__del__`, which starts the shutdown procedure, will not be
791
+ # called. That, in turn, means that workers aren't notified. Attempting
792
+ # to join in `_exit_function` will then result in a hang.
793
+ #
794
+ # For context, `_exit_function` is also registered as an `atexit` call.
795
+ # So it is unclear to me (@ssnl) why this is needed in a finally block.
796
+ # The code dates back to 2008 and there is no comment on the original
797
+ # PEP 371 or patch https://bugs.python.org/issue3050 (containing both
798
+ # the finally block and the `atexit` registration) that explains this.
799
+ #
800
+ #
801
+ # Finally, another choice is to just shutdown workers with logic in 1
802
+ # above whenever we see an error in `next`. This isn't ideal because
803
+ # a. It prevents users from using try-catch to resume data loading.
804
+ # b. It doesn't prevent hanging if users have references to the
805
+ # iterator.
806
+ #
807
+ # 3. All processes exit if any of them die unexpectedly by fatal signals.
808
+ #
809
+ # As shown above, the workers are set as daemonic children of the main
810
+ # process. However, automatic cleaning-up of such child processes only
811
+ # happens if the parent process exits gracefully (e.g., not via fatal
812
+ # signals like SIGKILL). So we must ensure that each process will exit
813
+ # even the process that should send/receive data to/from it were
814
+ # killed, i.e.,
815
+ #
816
+ # a. A process won't hang when getting from a queue.
817
+ #
818
+ # Even with carefully designed data dependencies (i.e., a `put()`
819
+ # always corresponding to a `get()`), hanging on `get()` can still
820
+ # happen when data in queue is corrupted (e.g., due to
821
+ # `cancel_join_thread` or unexpected exit).
822
+ #
823
+ # For child exit, we set a timeout whenever we try to get data
824
+ # from `data_queue`, and check the workers' status on each timeout
825
+ # and error.
826
+ # See `_DataLoaderiter._get_batch()` and
827
+ # `_DataLoaderiter._try_get_data()` for details.
828
+ #
829
+ # Additionally, for child exit on non-Windows platforms, we also
830
+ # register a SIGCHLD handler (which is supported on Windows) on
831
+ # the main process, which checks if any of the workers fail in the
832
+ # (Python) handler. This is more efficient and faster in detecting
833
+ # worker failures, compared to only using the above mechanism.
834
+ # See `DataLoader.cpp` and `_utils/signal_handling.py` for details.
835
+ #
836
+ # For `.get()` calls where the sender(s) is not the workers, we
837
+ # guard them with timeouts, and check the status of the sender
838
+ # when timeout happens:
839
+ # + in the workers, the `_utils.worker.ManagerWatchdog` class
840
+ # checks the status of the main process.
841
+ # + if `pin_memory=True`, when getting from `pin_memory_thread`,
842
+ # check `pin_memory_thread` status periodically until `.get()`
843
+ # returns or see that `pin_memory_thread` died.
844
+ #
845
+ # b. A process won't hang when putting into a queue;
846
+ #
847
+ # We use `mp.Queue` which has a separate background thread to put
848
+ # objects from an unbounded buffer array. The background thread is
849
+ # daemonic and usually automatically joined when the process
850
+ # *exits*.
851
+ #
852
+ # In case that the receiver has ended abruptly while
853
+ # reading from the pipe, the join will hang forever. The usual
854
+ # solution for this in Python is calling `q.cancel_join_thread`,
855
+ # which prevents automatically joining it when finalizing
856
+ # (exiting).
857
+ #
858
+ # Nonetheless, `cancel_join_thread` must only be called when the
859
+ # queue is **not** going to be read from or write into by another
860
+ # process, because it may hold onto a lock or leave corrupted data
861
+ # in the queue, leading other readers/writers to hang.
862
+ #
863
+ # Hence,
864
+ # + For worker processes, we only do so (for their output
865
+ # queues, i.e., `worker_result_queue`) before exiting.
866
+ # + For `pin_memory_thread`, its output queue `data_queue` is a
867
+ # `queue.Queue` that does blocking `put` if the queue is full.
868
+ # So there is no above problem, but as a result, in
869
+ # `_pin_memory_loop`, we do need to wrap the `put` in a loop
870
+ # that breaks not only upon success, but also when the main
871
+ # process stops reading, i.e., is shutting down.
872
+ # + For loader process, we `cancel_join_thread()` for all
873
+ # `_index_queues` because the whole purpose of workers and
874
+ # `pin_memory_thread` is to serve the loader process. If
875
+ # loader process is already exiting, we don't really care if
876
+ # the queues are corrupted.
877
+ #
878
+ #
879
+ # Now let's get back to 1:
880
+ # how we gracefully exit the workers when the last reference to the
881
+ # iterator is gone.
882
+ #
883
+ # To achieve this, we implement the following logic along with the design
884
+ # choices mentioned above:
885
+ #
886
+ # `workers_done_event`:
887
+ # A `multiprocessing.Event` shared among the main process and all worker
888
+ # processes. This is used to signal the workers that the iterator is
889
+ # shutting down. After it is set, they will not send processed data to
890
+ # queues anymore, and only wait for the final `None` before exiting.
891
+ # `done_event` isn't strictly needed. I.e., we can just check for `None`
892
+ # from the input queue, but it allows us to skip wasting resources
893
+ # processing data if we are already shutting down.
894
+ #
895
+ # `pin_memory_thread_done_event`:
896
+ # A `threading.Event` for a similar purpose to that of
897
+ # `workers_done_event`, but is for the `pin_memory_thread`. The reason
898
+ # that separate events are needed is that `pin_memory_thread` reads from
899
+ # the output queue of the workers. But the workers, upon seeing that
900
+ # `workers_done_event` is set, only wants to see the final `None`, and is
901
+ # not required to flush all data in the output queue (e.g., it may call
902
+ # `cancel_join_thread` on that queue if its `IterableDataset` iterator
903
+ # happens to exhaust coincidentally, which is out of the control of the
904
+ # main process). Thus, since we will exit `pin_memory_thread` before the
905
+ # workers (see below), two separete events are used.
906
+ #
907
+ # NOTE: In short, the protocol is that the main process will set these
908
+ # `done_event`s and then the corresponding processes/threads a `None`,
909
+ # and that they may exit at any time after receiving the `None`.
910
+ #
911
+ # NOTE: Using `None` as the final signal is valid, since normal data will
912
+ # always be a 2-tuple with the 1st element being the index of the data
913
+ # transferred (different from dataset index/key), and the 2nd being
914
+ # either the dataset key or the data sample (depending on which part
915
+ # of the data model the queue is at).
916
+ #
917
+ # [ worker processes ]
918
+ # While loader process is alive:
919
+ # Get from `index_queue`.
920
+ # If get anything else,
921
+ # Check `workers_done_event`.
922
+ # If set, continue to next iteration
923
+ # i.e., keep getting until see the `None`, then exit.
924
+ # Otherwise, process data:
925
+ # If is fetching from an `IterableDataset` and the iterator
926
+ # is exhausted, send an `_IterableDatasetStopIteration`
927
+ # object to signal iteration end. The main process, upon
928
+ # receiving such an object, will send `None` to this
929
+ # worker and not use the corresponding `index_queue`
930
+ # anymore.
931
+ # If timed out,
932
+ # No matter `workers_done_event` is set (still need to see `None`)
933
+ # or not, must continue to next iteration.
934
+ # (outside loop)
935
+ # If `workers_done_event` is set, (this can be False with `IterableDataset`)
936
+ # `data_queue.cancel_join_thread()`. (Everything is ending here:
937
+ # main process won't read from it;
938
+ # other workers will also call
939
+ # `cancel_join_thread`.)
940
+ #
941
+ # [ pin_memory_thread ]
942
+ # # No need to check main thread. If this thread is alive, the main loader
943
+ # # thread must be alive, because this thread is set as daemonic.
944
+ # While `pin_memory_thread_done_event` is not set:
945
+ # Get from `worker_result_queue`.
946
+ # If timed out, continue to get in the next iteration.
947
+ # Otherwise, process data.
948
+ # While `pin_memory_thread_done_event` is not set:
949
+ # Put processed data to `data_queue` (a `queue.Queue` with blocking put)
950
+ # If timed out, continue to put in the next iteration.
951
+ # Otherwise, break, i.e., continuing to the out loop.
952
+ #
953
+ # NOTE: we don't check the status of the main thread because
954
+ # 1. if the process is killed by fatal signal, `pin_memory_thread`
955
+ # ends.
956
+ # 2. in other cases, either the cleaning-up in __del__ or the
957
+ # automatic exit of daemonic thread will take care of it.
958
+ # This won't busy-wait either because `.get(timeout)` does not
959
+ # busy-wait.
960
+ #
961
+ # [ main process ]
962
+ # In the DataLoader Iter's `__del__`
963
+ # b. Exit `pin_memory_thread`
964
+ # i. Set `pin_memory_thread_done_event`.
965
+ # ii Put `None` in `worker_result_queue`.
966
+ # iii. Join the `pin_memory_thread`.
967
+ # iv. `worker_result_queue.cancel_join_thread()`.
968
+ #
969
+ # c. Exit the workers.
970
+ # i. Set `workers_done_event`.
971
+ # ii. Put `None` in each worker's `index_queue`.
972
+ # iii. Join the workers.
973
+ # iv. Call `.cancel_join_thread()` on each worker's `index_queue`.
974
+ #
975
+ # NOTE: (c) is better placed after (b) because it may leave corrupted
976
+ # data in `worker_result_queue`, which `pin_memory_thread`
977
+ # reads from, in which case the `pin_memory_thread` can only
978
+ # happen at timing out, which is slow. Nonetheless, same thing
979
+ # happens if a worker is killed by signal at unfortunate times,
980
+ # but in other cases, we are better off having a non-corrupted
981
+ # `worker_result_queue` for `pin_memory_thread`.
982
+ #
983
+ # NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b)
984
+ # can be omitted
985
+ #
986
+ # NB: `done_event`s isn't strictly needed. E.g., we can just check for
987
+ # `None` from `index_queue`, but it allows us to skip wasting resources
988
+ # processing indices already in `index_queue` if we are already shutting
989
+ # down.
990
+
991
+ def __init__(self, loader):
992
+ super().__init__(loader)
993
+
994
+ self._prefetch_factor = loader.prefetch_factor
995
+
996
+ assert self._num_workers > 0
997
+ assert self._prefetch_factor > 0
998
+
999
+ if loader.multiprocessing_context is None:
1000
+ multiprocessing_context = multiprocessing
1001
+ else:
1002
+ multiprocessing_context = loader.multiprocessing_context
1003
+
1004
+ self._worker_init_fn = loader.worker_init_fn
1005
+
1006
+ # Adds forward compatibilities so classic DataLoader can work with DataPipes:
1007
+ # Additional worker init function will take care of sharding in MP and Distributed
1008
+ if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
1009
+ self._worker_init_fn = functools.partial(
1010
+ _sharding_worker_init_fn, self._worker_init_fn, self._world_size, self._rank)
1011
+
1012
+ # No certainty which module multiprocessing_context is
1013
+ self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
1014
+ self._worker_pids_set = False
1015
+ self._shutdown = False
1016
+ self._workers_done_event = multiprocessing_context.Event()
1017
+
1018
+ self._index_queues = []
1019
+ self._workers = []
1020
+ for i in range(self._num_workers):
1021
+ # No certainty which module multiprocessing_context is
1022
+ index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
1023
+ # Need to `cancel_join_thread` here!
1024
+ # See sections (2) and (3b) above.
1025
+ index_queue.cancel_join_thread()
1026
+ w = multiprocessing_context.Process(
1027
+ target=_utils.worker._worker_loop,
1028
+ args=(self._dataset_kind, self._dataset, index_queue,
1029
+ self._worker_result_queue, self._workers_done_event,
1030
+ self._auto_collation, self._collate_fn, self._drop_last,
1031
+ self._base_seed, self._worker_init_fn, i, self._num_workers,
1032
+ self._persistent_workers, self._shared_seed))
1033
+ w.daemon = True
1034
+ # NB: Process.start() actually take some time as it needs to
1035
+ # start a process and pass the arguments over via a pipe.
1036
+ # Therefore, we only add a worker to self._workers list after
1037
+ # it started, so that we do not call .join() if program dies
1038
+ # before it starts, and __del__ tries to join but will get:
1039
+ # AssertionError: can only join a started process.
1040
+ w.start()
1041
+ self._index_queues.append(index_queue)
1042
+ self._workers.append(w)
1043
+
1044
+ if self._pin_memory:
1045
+ self._pin_memory_thread_done_event = threading.Event()
1046
+
1047
+ # Queue is not type-annotated
1048
+ self._data_queue = queue.Queue() # type: ignore[var-annotated]
1049
+ if self._pin_memory_device == "xpu":
1050
+ current_device = torch.xpu.current_device() # type: ignore[attr-defined]
1051
+ elif self._pin_memory_device == torch._C._get_privateuse1_backend_name():
1052
+ custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name())
1053
+ current_device = custom_device_mod.current_device()
1054
+ else:
1055
+ current_device = torch.cuda.current_device() # choose cuda for default
1056
+ pin_memory_thread = threading.Thread(
1057
+ target=_utils.pin_memory._pin_memory_loop,
1058
+ args=(self._worker_result_queue, self._data_queue,
1059
+ current_device,
1060
+ self._pin_memory_thread_done_event, self._pin_memory_device))
1061
+ pin_memory_thread.daemon = True
1062
+ pin_memory_thread.start()
1063
+ # Similar to workers (see comment above), we only register
1064
+ # pin_memory_thread once it is started.
1065
+ self._pin_memory_thread = pin_memory_thread
1066
+ else:
1067
+ self._data_queue = self._worker_result_queue # type: ignore[assignment]
1068
+
1069
+ # In some rare cases, persistent workers (daemonic processes)
1070
+ # would be terminated before `__del__` of iterator is invoked
1071
+ # when main process exits
1072
+ # It would cause failure when pin_memory_thread tries to read
1073
+ # corrupted data from worker_result_queue
1074
+ # atexit is used to shutdown thread and child processes in the
1075
+ # right sequence before main process exits
1076
+ if self._persistent_workers and self._pin_memory:
1077
+ import atexit
1078
+ for w in self._workers:
1079
+ atexit.register(_MultiProcessingDataLoaderIter._clean_up_worker, w)
1080
+
1081
+ # .pid can be None only before process is spawned (not the case, so ignore)
1082
+ _utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore[misc]
1083
+ _utils.signal_handling._set_SIGCHLD_handler()
1084
+ self._worker_pids_set = True
1085
+ self._reset(loader, first_iter=True)
1086
+
1087
+ def _reset(self, loader, first_iter=False):
1088
+ super()._reset(loader, first_iter)
1089
+ self._send_idx = 0 # idx of the next task to be sent to workers
1090
+ self._rcvd_idx = 0 # idx of the next task to be returned in __next__
1091
+ # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx).
1092
+ # map: task idx => - (worker_id,) if data isn't fetched (outstanding)
1093
+ # \ (worker_id, data) if data is already fetched (out-of-order)
1094
+ self._task_info = {}
1095
+ self._tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1)
1096
+ # A list of booleans representing whether each worker still has work to
1097
+ # do, i.e., not having exhausted its iterable dataset object. It always
1098
+ # contains all `True`s if not using an iterable-style dataset
1099
+ # (i.e., if kind != Iterable).
1100
+ # Not that this indicates that a worker still has work to do *for this epoch*.
1101
+ # It does not mean that a worker is dead. In case of `_persistent_workers`,
1102
+ # the worker will be reset to available in the next epoch.
1103
+ self._workers_status = [True for i in range(self._num_workers)]
1104
+ # Reset the worker queue cycle so it resumes next epoch at worker 0
1105
+ self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers))
1106
+ # We resume the prefetching in case it was enabled
1107
+ if not first_iter:
1108
+ for idx in range(self._num_workers):
1109
+ self._index_queues[idx].put(_utils.worker._ResumeIteration(self._shared_seed))
1110
+ resume_iteration_cnt = self._num_workers
1111
+ while resume_iteration_cnt > 0:
1112
+ return_idx, return_data = self._get_data()
1113
+ if isinstance(return_idx, _utils.worker._ResumeIteration):
1114
+ assert return_data is None
1115
+ resume_iteration_cnt -= 1
1116
+ # prime the prefetch loop
1117
+ for _ in range(self._prefetch_factor * self._num_workers):
1118
+ self._try_put_index()
1119
+
1120
+ def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL):
1121
+ # Tries to fetch data from `self._data_queue` once for a given timeout.
1122
+ # This can also be used as inner loop of fetching without timeout, with
1123
+ # the sender status as the loop condition.
1124
+ #
1125
+ # This raises a `RuntimeError` if any worker died expectedly. This error
1126
+ # can come from either the SIGCHLD handler in `_utils/signal_handling.py`
1127
+ # (only for non-Windows platforms), or the manual check below on errors
1128
+ # and timeouts.
1129
+ #
1130
+ # Returns a 2-tuple:
1131
+ # (bool: whether successfully get data, any: data if successful else None)
1132
+ try:
1133
+ data = self._data_queue.get(timeout=timeout)
1134
+ return (True, data)
1135
+ except Exception as e:
1136
+ # At timeout and error, we manually check whether any worker has
1137
+ # failed. Note that this is the only mechanism for Windows to detect
1138
+ # worker failures.
1139
+ failed_workers = []
1140
+ for worker_id, w in enumerate(self._workers):
1141
+ if self._workers_status[worker_id] and not w.is_alive():
1142
+ failed_workers.append(w)
1143
+ self._mark_worker_as_unavailable(worker_id)
1144
+ if len(failed_workers) > 0:
1145
+ pids_str = ', '.join(str(w.pid) for w in failed_workers)
1146
+ raise RuntimeError(f'DataLoader worker (pid(s) {pids_str}) exited unexpectedly') from e
1147
+ if isinstance(e, queue.Empty):
1148
+ return (False, None)
1149
+ import tempfile
1150
+ import errno
1151
+ try:
1152
+ # Raise an exception if we are this close to the FDs limit.
1153
+ # Apparently, trying to open only one file is not a sufficient
1154
+ # test.
1155
+ # See NOTE [ DataLoader on Linux and open files limit ]
1156
+ fds_limit_margin = 10
1157
+ fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)]
1158
+ except OSError as e:
1159
+ if e.errno == errno.EMFILE:
1160
+ raise RuntimeError(
1161
+ "Too many open files. Communication with the"
1162
+ " workers is no longer possible. Please increase the"
1163
+ " limit using `ulimit -n` in the shell or change the"
1164
+ " sharing strategy by calling"
1165
+ " `torch.multiprocessing.set_sharing_strategy('file_system')`"
1166
+ " at the beginning of your code") from None
1167
+ raise
1168
+
1169
+ # NOTE [ DataLoader on Linux and open files limit ]
1170
+ #
1171
+ # On Linux when DataLoader is used with multiprocessing we pass the data between
1172
+ # the root process and the workers through SHM files. We remove those files from
1173
+ # the filesystem as soon as they are created and keep them alive by
1174
+ # passing around their file descriptors through AF_UNIX sockets. (See
1175
+ # docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in
1176
+ # the wiki (https://github.com/pytorch/pytorch/wiki).)
1177
+ #
1178
+ # This sometimes leads us to exceeding the open files limit. When that happens,
1179
+ # and the offending file descriptor is coming over a socket, the `socket` Python
1180
+ # package silently strips the file descriptor from the message, setting only the
1181
+ # `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that
1182
+ # it _indicates that some control data were discarded due to lack of space in
1183
+ # the buffer for ancillary data_). This might reflect the C implementation of
1184
+ # AF_UNIX sockets.
1185
+ #
1186
+ # This behaviour can be reproduced with the script and instructions at the
1187
+ # bottom of this note.
1188
+ #
1189
+ # When that happens, the standard Python `multiprocessing` (and not
1190
+ # `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata`
1191
+ #
1192
+ # Sometimes, instead of the FD being stripped, you may get an `OSError:
1193
+ # Too many open files`, both in the script below and in DataLoader. However,
1194
+ # this is rare and seems to be nondeterministic.
1195
+ #
1196
+ #
1197
+ # #!/usr/bin/env python3
1198
+ # import sys
1199
+ # import socket
1200
+ # import os
1201
+ # import array
1202
+ # import shutil
1203
+ # import socket
1204
+ #
1205
+ #
1206
+ # if len(sys.argv) != 4:
1207
+ # print("Usage: ", sys.argv[0], " tmp_dirname iteration (send|recv)")
1208
+ # sys.exit(1)
1209
+ #
1210
+ # if __name__ == '__main__':
1211
+ # dirname = sys.argv[1]
1212
+ # sock_path = dirname + "/sock"
1213
+ # iterations = int(sys.argv[2])
1214
+ # def dummy_path(i):
1215
+ # return dirname + "/" + str(i) + ".dummy"
1216
+ #
1217
+ #
1218
+ # if sys.argv[3] == 'send':
1219
+ # while not os.path.exists(sock_path):
1220
+ # pass
1221
+ # client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
1222
+ # client.connect(sock_path)
1223
+ # for i in range(iterations):
1224
+ # fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT)
1225
+ # ancdata = array.array('i', [fd])
1226
+ # msg = bytes([i % 256])
1227
+ # print("Sending fd ", fd, " (iteration #", i, ")")
1228
+ # client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)])
1229
+ #
1230
+ #
1231
+ # else:
1232
+ # assert sys.argv[3] == 'recv'
1233
+ #
1234
+ # if os.path.exists(dirname):
1235
+ # raise Exception("Directory exists")
1236
+ #
1237
+ # os.mkdir(dirname)
1238
+ #
1239
+ # print("Opening socket...")
1240
+ # server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
1241
+ # server.bind(sock_path)
1242
+ #
1243
+ # print("Listening...")
1244
+ # for i in range(iterations):
1245
+ # a = array.array('i')
1246
+ # msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize))
1247
+ # assert(len(ancdata) == 1)
1248
+ # cmsg_level, cmsg_type, cmsg_data = ancdata[0]
1249
+ # a.frombytes(cmsg_data)
1250
+ # print("Received fd ", a[0], " (iteration #", i, ")")
1251
+ #
1252
+ # shutil.rmtree(dirname)
1253
+ #
1254
+ # Steps to reproduce:
1255
+ #
1256
+ # 1. Run two shells and set lower file descriptor limit in the receiving one:
1257
+ # (shell1) ulimit -n 1020
1258
+ # (shell2) ulimit -n 1022
1259
+ #
1260
+ # 2. Run the script above with the `recv` option in the first shell
1261
+ # (shell1) ./test_socket.py sock_tmp 1017 recv
1262
+ #
1263
+ # 3. Run the script with the `send` option in the second shell:
1264
+ # (shell2) ./test_socket.py sock_tmp 1017 send
1265
+
1266
+ def _get_data(self):
1267
+ # Fetches data from `self._data_queue`.
1268
+ #
1269
+ # We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds,
1270
+ # which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)`
1271
+ # in a loop. This is the only mechanism to detect worker failures for
1272
+ # Windows. For other platforms, a SIGCHLD handler is also used for
1273
+ # worker failure detection.
1274
+ #
1275
+ # If `pin_memory=True`, we also need check if `pin_memory_thread` had
1276
+ # died at timeouts.
1277
+ if self._timeout > 0:
1278
+ success, data = self._try_get_data(self._timeout)
1279
+ if success:
1280
+ return data
1281
+ else:
1282
+ raise RuntimeError(f'DataLoader timed out after {self._timeout} seconds')
1283
+ elif self._pin_memory:
1284
+ while self._pin_memory_thread.is_alive():
1285
+ success, data = self._try_get_data()
1286
+ if success:
1287
+ return data
1288
+ else:
1289
+ # while condition is false, i.e., pin_memory_thread died.
1290
+ raise RuntimeError('Pin memory thread exited unexpectedly')
1291
+ # In this case, `self._data_queue` is a `queue.Queue`,. But we don't
1292
+ # need to call `.task_done()` because we don't use `.join()`.
1293
+ else:
1294
+ while True:
1295
+ success, data = self._try_get_data()
1296
+ if success:
1297
+ return data
1298
+
1299
+ def _next_data(self):
1300
+ while True:
1301
+ # If the worker responsible for `self._rcvd_idx` has already ended
1302
+ # and was unable to fulfill this task (due to exhausting an `IterableDataset`),
1303
+ # we try to advance `self._rcvd_idx` to find the next valid index.
1304
+ #
1305
+ # This part needs to run in the loop because both the `self._get_data()`
1306
+ # call and `_IterableDatasetStopIteration` check below can mark
1307
+ # extra worker(s) as dead.
1308
+ while self._rcvd_idx < self._send_idx:
1309
+ info = self._task_info[self._rcvd_idx]
1310
+ worker_id = info[0]
1311
+ if len(info) == 2 or self._workers_status[worker_id]: # has data or is still active
1312
+ break
1313
+ del self._task_info[self._rcvd_idx]
1314
+ self._rcvd_idx += 1
1315
+ else:
1316
+ # no valid `self._rcvd_idx` is found (i.e., didn't break)
1317
+ if not self._persistent_workers:
1318
+ self._shutdown_workers()
1319
+ raise StopIteration
1320
+
1321
+ # Now `self._rcvd_idx` is the batch index we want to fetch
1322
+
1323
+ # Check if the next sample has already been generated
1324
+ if len(self._task_info[self._rcvd_idx]) == 2:
1325
+ data = self._task_info.pop(self._rcvd_idx)[1]
1326
+ return self._process_data(data)
1327
+
1328
+ assert not self._shutdown and self._tasks_outstanding > 0
1329
+ idx, data = self._get_data()
1330
+ self._tasks_outstanding -= 1
1331
+ if self._dataset_kind == _DatasetKind.Iterable:
1332
+ # Check for _IterableDatasetStopIteration
1333
+ if isinstance(data, _utils.worker._IterableDatasetStopIteration):
1334
+ if self._persistent_workers:
1335
+ self._workers_status[data.worker_id] = False
1336
+ else:
1337
+ self._mark_worker_as_unavailable(data.worker_id)
1338
+ self._try_put_index()
1339
+ continue
1340
+
1341
+ if idx != self._rcvd_idx:
1342
+ # store out-of-order samples
1343
+ self._task_info[idx] += (data,)
1344
+ else:
1345
+ del self._task_info[idx]
1346
+ return self._process_data(data)
1347
+
1348
+ def _try_put_index(self):
1349
+ assert self._tasks_outstanding < self._prefetch_factor * self._num_workers
1350
+
1351
+ try:
1352
+ index = self._next_index()
1353
+ except StopIteration:
1354
+ return
1355
+ for _ in range(self._num_workers): # find the next active worker, if any
1356
+ worker_queue_idx = next(self._worker_queue_idx_cycle)
1357
+ if self._workers_status[worker_queue_idx]:
1358
+ break
1359
+ else:
1360
+ # not found (i.e., didn't break)
1361
+ return
1362
+
1363
+ self._index_queues[worker_queue_idx].put((self._send_idx, index)) # type: ignore[possibly-undefined]
1364
+ self._task_info[self._send_idx] = (worker_queue_idx,)
1365
+ self._tasks_outstanding += 1
1366
+ self._send_idx += 1
1367
+
1368
+ def _process_data(self, data):
1369
+ self._rcvd_idx += 1
1370
+ self._try_put_index()
1371
+ if isinstance(data, ExceptionWrapper):
1372
+ data.reraise()
1373
+ return data
1374
+
1375
+ def _mark_worker_as_unavailable(self, worker_id, shutdown=False):
1376
+ # Mark a worker as having finished its work e.g., due to
1377
+ # exhausting an `IterableDataset`. This should be used only when this
1378
+ # `_MultiProcessingDataLoaderIter` is going to continue running.
1379
+
1380
+ assert self._workers_status[worker_id] or (self._persistent_workers and shutdown)
1381
+
1382
+ # Signal termination to that specific worker.
1383
+ q = self._index_queues[worker_id]
1384
+ # Indicate that no more data will be put on this queue by the current
1385
+ # process.
1386
+ q.put(None)
1387
+
1388
+ # Note that we don't actually join the worker here, nor do we remove the
1389
+ # worker's pid from C side struct because (1) joining may be slow, and
1390
+ # (2) since we don't join, the worker may still raise error, and we
1391
+ # prefer capturing those, rather than ignoring them, even though they
1392
+ # are raised after the worker has finished its job.
1393
+ # Joinning is deferred to `_shutdown_workers`, which it is called when
1394
+ # all workers finish their jobs (e.g., `IterableDataset` replicas) or
1395
+ # when this iterator is garbage collected.
1396
+
1397
+ self._workers_status[worker_id] = False
1398
+
1399
+ assert self._workers_done_event.is_set() == shutdown
1400
+
1401
+ def _shutdown_workers(self):
1402
+ # Called when shutting down this `_MultiProcessingDataLoaderIter`.
1403
+ # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on
1404
+ # the logic of this function.
1405
+ if _utils is None or _utils.python_exit_status is True or _utils.python_exit_status is None:
1406
+ # See (2) of the note. If Python is shutting down, do no-op.
1407
+ return
1408
+ # Normal exit when last reference is gone / iterator is depleted.
1409
+ # See (1) and the second half of the note.
1410
+ if not self._shutdown:
1411
+ self._shutdown = True
1412
+ try:
1413
+ # Normal exit when last reference is gone / iterator is depleted.
1414
+ # See (1) and the second half of the note.
1415
+
1416
+ # Exit `pin_memory_thread` first because exiting workers may leave
1417
+ # corrupted data in `worker_result_queue` which `pin_memory_thread`
1418
+ # reads from.
1419
+ if hasattr(self, '_pin_memory_thread'):
1420
+ # Use hasattr in case error happens before we set the attribute.
1421
+ self._pin_memory_thread_done_event.set()
1422
+ # Send something to pin_memory_thread in case it is waiting
1423
+ # so that it can wake up and check `pin_memory_thread_done_event`
1424
+ self._worker_result_queue.put((None, None))
1425
+ self._pin_memory_thread.join()
1426
+ self._worker_result_queue.cancel_join_thread()
1427
+ self._worker_result_queue.close()
1428
+
1429
+ # Exit workers now.
1430
+ self._workers_done_event.set()
1431
+ for worker_id in range(len(self._workers)):
1432
+ # Get number of workers from `len(self._workers)` instead of
1433
+ # `self._num_workers` in case we error before starting all
1434
+ # workers.
1435
+ # If we are using workers_status with persistent_workers
1436
+ # we have to shut it down because the worker is paused
1437
+ if self._persistent_workers or self._workers_status[worker_id]:
1438
+ self._mark_worker_as_unavailable(worker_id, shutdown=True)
1439
+ for w in self._workers:
1440
+ # We should be able to join here, but in case anything went
1441
+ # wrong, we set a timeout and if the workers fail to join,
1442
+ # they are killed in the `finally` block.
1443
+ w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
1444
+ for q in self._index_queues:
1445
+ q.cancel_join_thread()
1446
+ q.close()
1447
+ finally:
1448
+ # Even though all this function does is putting into queues that
1449
+ # we have called `cancel_join_thread` on, weird things can
1450
+ # happen when a worker is killed by a signal, e.g., hanging in
1451
+ # `Event.set()`. So we need to guard this with SIGCHLD handler,
1452
+ # and remove pids from the C side data structure only at the
1453
+ # end.
1454
+ #
1455
+ # FIXME: Unfortunately, for Windows, we are missing a worker
1456
+ # error detection mechanism here in this function, as it
1457
+ # doesn't provide a SIGCHLD handler.
1458
+ if self._worker_pids_set:
1459
+ _utils.signal_handling._remove_worker_pids(id(self))
1460
+ self._worker_pids_set = False
1461
+ for w in self._workers:
1462
+ if w.is_alive():
1463
+ # Existing mechanisms try to make the workers exit
1464
+ # peacefully, but in case that we unfortunately reach
1465
+ # here, which we shouldn't, (e.g., pytorch/pytorch#39570),
1466
+ # we kill the worker.
1467
+ w.terminate()
1468
+
1469
+ # staticmethod is used to remove reference to `_MultiProcessingDataLoaderIter`
1470
+ @staticmethod
1471
+ def _clean_up_worker(w):
1472
+ try:
1473
+ w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
1474
+ finally:
1475
+ if w.is_alive():
1476
+ w.terminate()
1477
+
1478
+ def __del__(self):
1479
+ self._shutdown_workers()
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from . import iter
2
+ from . import map
3
+ from . import dataframe
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from functools import wraps
3
+ from typing import Any, Callable, Optional, Type, Union, get_type_hints
4
+ from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe
5
+ from torch.utils.data.datapipes._typing import _DataPipeMeta
6
+
7
+
8
+ ######################################################
9
+ # Functional API
10
+ ######################################################
11
+ class functional_datapipe:
12
+ name: str
13
+
14
+ def __init__(self, name: str, enable_df_api_tracing=False) -> None:
15
+ """
16
+ Define a functional datapipe.
17
+
18
+ Args:
19
+ enable_df_api_tracing - if set, any returned DataPipe would accept
20
+ DataFrames API in tracing mode.
21
+ """
22
+ self.name = name
23
+ self.enable_df_api_tracing = enable_df_api_tracing
24
+
25
+ def __call__(self, cls):
26
+ if issubclass(cls, IterDataPipe):
27
+ if isinstance(cls, Type): # type: ignore[arg-type]
28
+ if not isinstance(cls, _DataPipeMeta):
29
+ raise TypeError('`functional_datapipe` can only decorate IterDataPipe')
30
+ # with non_deterministic decorator
31
+ else:
32
+ if not isinstance(cls, non_deterministic) and \
33
+ not (hasattr(cls, '__self__') and
34
+ isinstance(cls.__self__, non_deterministic)):
35
+ raise TypeError('`functional_datapipe` can only decorate IterDataPipe')
36
+ IterDataPipe.register_datapipe_as_function(self.name, cls, enable_df_api_tracing=self.enable_df_api_tracing)
37
+ elif issubclass(cls, MapDataPipe):
38
+ MapDataPipe.register_datapipe_as_function(self.name, cls)
39
+
40
+ return cls
41
+
42
+
43
+ ######################################################
44
+ # Determinism
45
+ ######################################################
46
+ _determinism: bool = False
47
+
48
+
49
+ class guaranteed_datapipes_determinism:
50
+ prev: bool
51
+
52
+ def __init__(self) -> None:
53
+ global _determinism
54
+ self.prev = _determinism
55
+ _determinism = True
56
+
57
+ def __enter__(self) -> None:
58
+ pass
59
+
60
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
61
+ global _determinism
62
+ _determinism = self.prev
63
+
64
+
65
+ class non_deterministic:
66
+ cls: Optional[Type[IterDataPipe]] = None
67
+ # TODO: Lambda for picking
68
+ deterministic_fn: Callable[[], bool]
69
+
70
+ def __init__(self, arg: Union[Type[IterDataPipe], Callable[[], bool]]) -> None:
71
+ # 1. Decorator doesn't have any argument
72
+ if isinstance(arg, Type): # type: ignore[arg-type]
73
+ if not issubclass(arg, IterDataPipe): # type: ignore[arg-type]
74
+ raise TypeError("Only `IterDataPipe` can be decorated with `non_deterministic`"
75
+ f", but {arg.__name__} is found")
76
+ self.cls = arg # type: ignore[assignment]
77
+ # 2. Decorator has an argument of a function
78
+ # This class should behave differently given different inputs. Use this
79
+ # function to verify the determinism for each instance.
80
+ # When the function returns True, the instance is non-deterministic. Otherwise,
81
+ # the instance is a deterministic DataPipe.
82
+ elif isinstance(arg, Callable): # type:ignore[arg-type]
83
+ self.deterministic_fn = arg # type: ignore[assignment, misc]
84
+ else:
85
+ raise TypeError(f"{arg} can not be decorated by non_deterministic")
86
+
87
+ def __call__(self, *args, **kwargs):
88
+ global _determinism
89
+ # Decorate IterDataPipe
90
+ if self.cls is not None:
91
+ if _determinism:
92
+ raise TypeError("{} is non-deterministic, but you set 'guaranteed_datapipes_determinism'. "
93
+ "You can turn off determinism for this DataPipe if that is acceptable "
94
+ "for your application".format(self.cls.__name__))
95
+ return self.cls(*args, **kwargs) # type: ignore[call-arg]
96
+
97
+ # Decorate with a functional argument
98
+ if not (isinstance(args[0], Type) and # type: ignore[arg-type]
99
+ issubclass(args[0], IterDataPipe)):
100
+ raise TypeError(f"Only `IterDataPipe` can be decorated, but {args[0].__name__} is found")
101
+ self.cls = args[0]
102
+ return self.deterministic_wrapper_fn
103
+
104
+ def deterministic_wrapper_fn(self, *args, **kwargs) -> IterDataPipe:
105
+ res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc]
106
+ if not isinstance(res, bool):
107
+ raise TypeError("deterministic_fn of `non_deterministic` decorator is required "
108
+ f"to return a boolean value, but {type(res)} is found")
109
+ global _determinism
110
+ if _determinism and res:
111
+ raise TypeError(f"{self.cls.__name__} is non-deterministic with the inputs, but you set " # type: ignore[union-attr]
112
+ "'guaranteed_datapipes_determinism'. You can turn off determinism "
113
+ "for this DataPipe if that is acceptable for your application"
114
+ )
115
+ return self.cls(*args, **kwargs) # type: ignore[call-arg, misc]
116
+
117
+
118
+ ######################################################
119
+ # Type validation
120
+ ######################################################
121
+ # Validate each argument of DataPipe with hint as a subtype of the hint.
122
+ def argument_validation(f):
123
+ signature = inspect.signature(f)
124
+ hints = get_type_hints(f)
125
+
126
+ @wraps(f)
127
+ def wrapper(*args, **kwargs):
128
+ bound = signature.bind(*args, **kwargs)
129
+ for argument_name, value in bound.arguments.items():
130
+ if argument_name in hints and isinstance(hints[argument_name], _DataPipeMeta):
131
+ hint = hints[argument_name]
132
+ if not isinstance(value, IterDataPipe):
133
+ raise TypeError(f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}")
134
+ if not value.type.issubtype(hint.type):
135
+ raise TypeError(f"Expected type of argument '{argument_name}' as a subtype of "
136
+ f"hint {hint.type}, but found {value.type}"
137
+ )
138
+
139
+ return f(*args, **kwargs)
140
+
141
+ return wrapper
142
+
143
+
144
+ # Default value is True
145
+ _runtime_validation_enabled: bool = True
146
+
147
+
148
+ class runtime_validation_disabled:
149
+ prev: bool
150
+
151
+ def __init__(self) -> None:
152
+ global _runtime_validation_enabled
153
+ self.prev = _runtime_validation_enabled
154
+ _runtime_validation_enabled = False
155
+
156
+ def __enter__(self) -> None:
157
+ pass
158
+
159
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
160
+ global _runtime_validation_enabled
161
+ _runtime_validation_enabled = self.prev
162
+
163
+
164
+ # Runtime checking
165
+ # Validate output data is subtype of return hint
166
+ def runtime_validation(f):
167
+ # TODO:
168
+ # Can be extended to validate '__getitem__' and nonblocking
169
+ if f.__name__ != '__iter__':
170
+ raise TypeError(f"Can not decorate function {f.__name__} with 'runtime_validation'")
171
+
172
+ @wraps(f)
173
+ def wrapper(self):
174
+ global _runtime_validation_enabled
175
+ if not _runtime_validation_enabled:
176
+ yield from f(self)
177
+ else:
178
+ it = f(self)
179
+ for d in it:
180
+ if not self.type.issubtype_of_instance(d):
181
+ raise RuntimeError(f"Expected an instance as subtype of {self.type}, but found {d}({type(d)})")
182
+ yield d
183
+
184
+ return wrapper
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import functools
3
+ from enum import Enum
4
+
5
+ import torch.autograd
6
+
7
+
8
+ class _SnapshotState(Enum):
9
+ r"""
10
+ These are the snapshotting-related states that IterDataPipes can be in.
11
+
12
+ `NotStarted` - allows you to restore a snapshot and create an iterator with reset
13
+ `Restored` - cannot restore again, allows you to create an iterator without resetting the DataPipe
14
+ `Iterating` - can restore, will reset if you create a new iterator
15
+ """
16
+
17
+ NotStarted = 0
18
+ Restored = 1
19
+ Iterating = 2
20
+
21
+
22
+ def _simplify_obj_name(obj) -> str:
23
+ """Simplify the display strings of objects for the purpose of rendering within DataPipe error messages."""
24
+ if inspect.isfunction(obj):
25
+ return obj.__name__
26
+ else:
27
+ return repr(obj)
28
+
29
+
30
+ def _strip_datapipe_from_name(name: str) -> str:
31
+ return name.replace("IterDataPipe", "").replace("MapDataPipe", "")
32
+
33
+
34
+ def _generate_input_args_string(obj):
35
+ """Generate a string for the input arguments of an object."""
36
+ signature = inspect.signature(obj.__class__)
37
+ input_param_names = set()
38
+ for param_name in signature.parameters.keys():
39
+ input_param_names.add(param_name)
40
+ result = []
41
+ for name, value in inspect.getmembers(obj):
42
+ if name in input_param_names:
43
+ result.append((name, _simplify_obj_name(value)))
44
+ return ', '.join([f'{name}={value}' for name, value in result])
45
+
46
+
47
+ def _generate_iterdatapipe_msg(datapipe, simplify_dp_name: bool = False):
48
+ output_string = f"{datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
49
+ if simplify_dp_name:
50
+ output_string = _strip_datapipe_from_name(output_string)
51
+ return output_string
52
+
53
+
54
+ def _gen_invalid_iterdatapipe_msg(datapipe):
55
+ return ("This iterator has been invalidated because another iterator has been created "
56
+ f"from the same IterDataPipe: {_generate_iterdatapipe_msg(datapipe)}\n"
57
+ "This may be caused multiple references to the same IterDataPipe. We recommend "
58
+ "using `.fork()` if that is necessary.")
59
+
60
+
61
+ _feedback_msg = ("\nFor feedback regarding this single iterator per IterDataPipe constraint, feel free "
62
+ "to comment on this issue: https://github.com/pytorch/data/issues/45.")
63
+
64
+
65
+ def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None:
66
+ r"""
67
+ Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception.
68
+
69
+ In the case of ChildDataPipe, the ID gets compared to the one stored in `main_datapipe` as well.
70
+ """
71
+ if next_method_exists:
72
+ # This is the case where `IterDataPipe` has both `__iter__` and `__next__`.
73
+ # The `_valid_iterator_id` should either be never set (`None`), or set by at most one
74
+ # iterator (`0`). Otherwise, it means there are multiple iterators.
75
+ if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0:
76
+ extra_msg = "\nNote that this exception is raised inside your IterDataPipe's a `__next__` method"
77
+ raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg)
78
+ elif hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
79
+ if hasattr(datapipe, "_check_valid_iterator_id"):
80
+ if not datapipe._check_valid_iterator_id(iterator_id):
81
+ raise RuntimeError("This iterator has been invalidated, because a new iterator has been created "
82
+ f"from one of the ChildDataPipes of "
83
+ f"{_generate_iterdatapipe_msg(datapipe.main_datapipe)}." + _feedback_msg)
84
+ else:
85
+ raise RuntimeError("ChildDataPipe must have method `_check_valid_iterator_id`.")
86
+ elif datapipe._valid_iterator_id != iterator_id:
87
+ raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg)
88
+
89
+
90
+ def _set_datapipe_valid_iterator_id(datapipe):
91
+ """Given a DataPipe, updates its valid iterator ID and reset the DataPipe."""
92
+ if hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
93
+ if hasattr(datapipe, "_set_main_datapipe_valid_iterator_id"):
94
+ datapipe._set_main_datapipe_valid_iterator_id() # reset() is called within this method when appropriate
95
+ else:
96
+ raise RuntimeError("ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`.")
97
+ else:
98
+ if datapipe._valid_iterator_id is None:
99
+ datapipe._valid_iterator_id = 0
100
+ else:
101
+ datapipe._valid_iterator_id += 1
102
+ datapipe.reset()
103
+ return datapipe._valid_iterator_id
104
+
105
+
106
+ def hook_iterator(namespace):
107
+ r"""
108
+ Define a hook that is applied to all `__iter__` of metaclass `_DataPipeMeta`.
109
+
110
+ This is done for the purpose of profiling and checking if an iterator is still valid.
111
+ """
112
+
113
+ def profiler_record_fn_context(datapipe):
114
+ if not hasattr(datapipe, "_profile_name"):
115
+ datapipe._profile_name = _generate_iterdatapipe_msg(datapipe, simplify_dp_name=True)
116
+ return torch.autograd.profiler.record_function(datapipe._profile_name)
117
+
118
+ class IteratorDecorator:
119
+ r"""
120
+ Wrap the iterator and modifying its `__next__` method.
121
+
122
+ This decorator is applied to DataPipes of which `__iter__` method is NOT a generator function.
123
+ Those `__iter__` method commonly returns `self` but not necessarily.
124
+ """
125
+
126
+ def __init__(self, iterator, datapipe, iterator_id, has_next_method):
127
+ self.iterator = iterator
128
+ self.datapipe = datapipe
129
+ self.iterator_id = iterator_id
130
+ self._profiler_enabled = torch.autograd._profiler_enabled()
131
+ # Check if `__iter__` returns `self` and `DataPipe` has `__next__`
132
+ self.self_and_has_next_method = self.iterator is self.datapipe and has_next_method
133
+
134
+ def __iter__(self):
135
+ return self
136
+
137
+ def _get_next(self):
138
+ """Return next with logic related to iterator validity, profiler, and incrementation of samples yielded."""
139
+ _check_iterator_valid(self.datapipe, self.iterator_id)
140
+ result = next(self.iterator)
141
+ if not self.self_and_has_next_method:
142
+ self.datapipe._number_of_samples_yielded += 1
143
+ return result
144
+
145
+ def __next__(self):
146
+ # TODO: Add try-except to in-place reduce traceback from the Exception
147
+ # See: https://github.com/pytorch/data/issues/284
148
+ if self._profiler_enabled:
149
+ with profiler_record_fn_context(self.datapipe):
150
+ return self._get_next()
151
+ else: # Decided against using `contextlib.nullcontext` for performance reasons
152
+ return self._get_next()
153
+
154
+ def __getattr__(self, name):
155
+ return getattr(self.iterator, name)
156
+
157
+ func = namespace['__iter__']
158
+
159
+ # ``__iter__`` of IterDataPipe is a generator function
160
+ if inspect.isgeneratorfunction(func):
161
+ @functools.wraps(func)
162
+ def wrap_generator(*args, **kwargs):
163
+ gen = func(*args, **kwargs)
164
+ datapipe = args[0]
165
+ if datapipe._fast_forward_iterator:
166
+ it = datapipe._fast_forward_iterator
167
+ datapipe._fast_forward_iterator = None
168
+ datapipe._snapshot_state = _SnapshotState.Iterating
169
+ while True:
170
+ try:
171
+ yield next(it)
172
+ except StopIteration:
173
+ return
174
+ iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator
175
+ _profiler_enabled = torch.autograd._profiler_enabled()
176
+ try:
177
+ if _profiler_enabled:
178
+ with profiler_record_fn_context(datapipe):
179
+ response = gen.send(None)
180
+ else:
181
+ response = gen.send(None)
182
+
183
+ while True:
184
+ datapipe._number_of_samples_yielded += 1
185
+ request = yield response
186
+ # Pass through here every time `__next__` is called
187
+ if _profiler_enabled:
188
+ with profiler_record_fn_context(datapipe):
189
+ _check_iterator_valid(datapipe, iterator_id)
190
+ response = gen.send(request)
191
+ else: # Decided against using `contextlib.nullcontext` for performance reasons
192
+ _check_iterator_valid(datapipe, iterator_id)
193
+ response = gen.send(request)
194
+ except StopIteration as e:
195
+ return
196
+ except Exception as e:
197
+ # TODO: Simplify the traceback message to skip over `response = gen.send(None)`
198
+ # Part of https://github.com/pytorch/data/issues/284
199
+ datapipe = args[0]
200
+ msg = "thrown by __iter__ of"
201
+ single_iterator_msg = "single iterator per IterDataPipe constraint"
202
+ if hasattr(e.args, '__len__'):
203
+ full_msg = f"{msg} {datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
204
+ if len(e.args) == 0 or not isinstance(e.args[0], str): # If an exception message doesn't exist
205
+ e.args = (f'\nThis exception is {full_msg}',)
206
+ elif msg not in e.args[0] and single_iterator_msg not in e.args[0]:
207
+ e.args = (e.args[0] + f'\nThis exception is {full_msg}',) + e.args[1:]
208
+ raise
209
+
210
+ namespace['__iter__'] = wrap_generator
211
+ else: # ``__iter__`` of IterDataPipe is NOT a generator function
212
+ # IterDataPipe is an iterator with both ``__iter__`` and ``__next__``
213
+ # And ``__iter__`` may or may not return `self`
214
+ if '__next__' in namespace: # If `__next__` exists, put a wrapper around it
215
+ next_func = namespace['__next__']
216
+
217
+ @functools.wraps(next_func)
218
+ def wrap_next(*args, **kwargs):
219
+ datapipe = args[0]
220
+ if torch.autograd._profiler_enabled():
221
+ with profiler_record_fn_context(datapipe):
222
+ result = next_func(*args, **kwargs)
223
+ else:
224
+ result = next_func(*args, **kwargs)
225
+ datapipe._number_of_samples_yielded += 1
226
+ return result
227
+
228
+ namespace['__next__'] = wrap_next
229
+
230
+ # Note that if the `__next__` and `__iter__` do something completely unrelated. It may cause issue but
231
+ # the user will be violating the iterator protocol. Potential issue:
232
+ # 1. Valid iterator ID may not update or checked properly
233
+ # 2. The number of samples yielded will be miscounted
234
+
235
+ # Regardless if `__next__` exists or not, `__iter__` needs a wrapper to track the number of valid iterators
236
+ @functools.wraps(func)
237
+ def wrap_iter(*args, **kwargs):
238
+ iter_ret = func(*args, **kwargs)
239
+ datapipe = args[0]
240
+ datapipe._snapshot_state = _SnapshotState.Iterating
241
+ if datapipe._fast_forward_iterator:
242
+ iter_ret = datapipe._fast_forward_iterator
243
+ datapipe._fast_forward_iterator = None
244
+ return iter_ret
245
+ iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator
246
+ return IteratorDecorator(iter_ret, datapipe, iterator_id, '__next__' in namespace)
247
+
248
+ namespace['__iter__'] = wrap_iter
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Taking reference from official Python typing
2
+ # https://github.com/python/cpython/blob/master/Lib/typing.py
3
+
4
+ import collections
5
+ import functools
6
+ import numbers
7
+ import sys
8
+
9
+ from torch.utils.data.datapipes._hook_iterator import hook_iterator, _SnapshotState
10
+ from typing import (Any, Dict, Iterator, Generic, List, Set, Tuple, TypeVar, Union,
11
+ get_type_hints)
12
+ from typing import _eval_type, _tp_cache, _type_check, _type_repr # type: ignore[attr-defined]
13
+ from typing import ForwardRef
14
+
15
+ # TODO: Use TypeAlias when Python 3.6 is deprecated
16
+ # Please check [Note: TypeMeta and TypeAlias]
17
+ # In case of metaclass conflict due to ABCMeta or _ProtocolMeta
18
+ # For Python 3.9, only Protocol in typing uses metaclass
19
+ from abc import ABCMeta
20
+ from typing import _GenericAlias # type: ignore[attr-defined, no-redef]
21
+
22
+ class GenericMeta(ABCMeta): # type: ignore[no-redef]
23
+ pass
24
+
25
+
26
+ class Integer(numbers.Integral):
27
+ pass
28
+
29
+
30
+ class Boolean(numbers.Integral):
31
+ pass
32
+
33
+
34
+ # Python 'type' object is not subscriptable
35
+ # Tuple[int, List, dict] -> valid
36
+ # tuple[int, list, dict] -> invalid
37
+ # Map Python 'type' to abstract base class
38
+ TYPE2ABC = {
39
+ bool: Boolean,
40
+ int: Integer,
41
+ float: numbers.Real,
42
+ complex: numbers.Complex,
43
+ dict: Dict,
44
+ list: List,
45
+ set: Set,
46
+ tuple: Tuple,
47
+ None: type(None),
48
+ }
49
+
50
+
51
+ def issubtype(left, right, recursive=True):
52
+ r"""
53
+ Check if the left-side type is a subtype of the right-side type.
54
+
55
+ If any of type is a composite type like `Union` and `TypeVar` with
56
+ bounds, it would be expanded into a list of types and check all
57
+ of left-side types are subtypes of either one from right-side types.
58
+ """
59
+ left = TYPE2ABC.get(left, left)
60
+ right = TYPE2ABC.get(right, right)
61
+
62
+ if right is Any or left == right:
63
+ return True
64
+
65
+ if isinstance(right, _GenericAlias):
66
+ if getattr(right, '__origin__', None) is Generic:
67
+ return True
68
+
69
+ if right == type(None):
70
+ return False
71
+
72
+ # Right-side type
73
+ constraints = _decompose_type(right)
74
+
75
+ if len(constraints) == 0 or Any in constraints:
76
+ return True
77
+
78
+ if left is Any:
79
+ return False
80
+
81
+ # Left-side type
82
+ variants = _decompose_type(left)
83
+
84
+ # all() will return True for empty variants
85
+ if len(variants) == 0:
86
+ return False
87
+
88
+ return all(_issubtype_with_constraints(variant, constraints, recursive) for variant in variants)
89
+
90
+
91
+ def _decompose_type(t, to_list=True):
92
+ if isinstance(t, TypeVar):
93
+ if t.__bound__ is not None:
94
+ ts = [t.__bound__]
95
+ else:
96
+ # For T_co, __constraints__ is ()
97
+ ts = list(t.__constraints__)
98
+ elif hasattr(t, '__origin__') and t.__origin__ == Union:
99
+ ts = t.__args__
100
+ else:
101
+ if not to_list:
102
+ return None
103
+ ts = [t]
104
+ # Ignored: Generator has incompatible item type "object"; expected "Type[Any]"
105
+ ts = [TYPE2ABC.get(_t, _t) for _t in ts] # type: ignore[misc]
106
+ return ts
107
+
108
+
109
+ def _issubtype_with_constraints(variant, constraints, recursive=True):
110
+ r"""
111
+ Check if the variant is a subtype of either one from constraints.
112
+
113
+ For composite types like `Union` and `TypeVar` with bounds, they
114
+ would be expanded for testing.
115
+ """
116
+ if variant in constraints:
117
+ return True
118
+
119
+ # [Note: Subtype for Union and TypeVar]
120
+ # Python typing is able to flatten Union[Union[...]] or Union[TypeVar].
121
+ # But it couldn't flatten the following scenarios:
122
+ # - Union[int, TypeVar[Union[...]]]
123
+ # - TypeVar[TypeVar[...]]
124
+ # So, variant and each constraint may be a TypeVar or a Union.
125
+ # In these cases, all of inner types from the variant are required to be
126
+ # extraced and verified as a subtype of any constraint. And, all of
127
+ # inner types from any constraint being a TypeVar or a Union are
128
+ # also required to be extracted and verified if the variant belongs to
129
+ # any of them.
130
+
131
+ # Variant
132
+ vs = _decompose_type(variant, to_list=False)
133
+
134
+ # Variant is TypeVar or Union
135
+ if vs is not None:
136
+ return all(_issubtype_with_constraints(v, constraints, recursive) for v in vs)
137
+
138
+ # Variant is not TypeVar or Union
139
+ if hasattr(variant, '__origin__') and variant.__origin__ is not None:
140
+ v_origin = variant.__origin__
141
+ # In Python-3.9 typing library untyped generics do not have args
142
+ v_args = getattr(variant, "__args__", None)
143
+ else:
144
+ v_origin = variant
145
+ v_args = None
146
+
147
+ # Constraints
148
+ for constraint in constraints:
149
+ cs = _decompose_type(constraint, to_list=False)
150
+
151
+ # Constraint is TypeVar or Union
152
+ if cs is not None:
153
+ if _issubtype_with_constraints(variant, cs, recursive):
154
+ return True
155
+ # Constraint is not TypeVar or Union
156
+ else:
157
+ # __origin__ can be None for plain list, tuple, ... in Python 3.6
158
+ if hasattr(constraint, '__origin__') and constraint.__origin__ is not None:
159
+ c_origin = constraint.__origin__
160
+ if v_origin == c_origin:
161
+ if not recursive:
162
+ return True
163
+ # In Python-3.9 typing library untyped generics do not have args
164
+ c_args = getattr(constraint, "__args__", None)
165
+ if c_args is None or len(c_args) == 0:
166
+ return True
167
+ if v_args is not None and len(v_args) == len(c_args) and \
168
+ all(issubtype(v_arg, c_arg) for v_arg, c_arg in zip(v_args, c_args)):
169
+ return True
170
+ # Tuple[int] -> Tuple
171
+ else:
172
+ if v_origin == constraint:
173
+ return True
174
+
175
+ return False
176
+
177
+
178
+ def issubinstance(data, data_type):
179
+ if not issubtype(type(data), data_type, recursive=False):
180
+ return False
181
+
182
+ # In Python-3.9 typing library __args__ attribute is not defined for untyped generics
183
+ dt_args = getattr(data_type, "__args__", None)
184
+ if isinstance(data, tuple):
185
+ if dt_args is None or len(dt_args) == 0:
186
+ return True
187
+ if len(dt_args) != len(data):
188
+ return False
189
+ return all(issubinstance(d, t) for d, t in zip(data, dt_args))
190
+ elif isinstance(data, (list, set)):
191
+ if dt_args is None or len(dt_args) == 0:
192
+ return True
193
+ t = dt_args[0]
194
+ return all(issubinstance(d, t) for d in data)
195
+ elif isinstance(data, dict):
196
+ if dt_args is None or len(dt_args) == 0:
197
+ return True
198
+ kt, vt = dt_args
199
+ return all(issubinstance(k, kt) and issubinstance(v, vt) for k, v in data.items())
200
+
201
+ return True
202
+
203
+
204
+ # [Note: TypeMeta and TypeAlias]
205
+ # In order to keep compatibility for Python 3.6, use Meta for the typing.
206
+ # TODO: When PyTorch drops the support for Python 3.6, it can be converted
207
+ # into the Alias system and using `__class_getitem__` for DataPipe. The
208
+ # typing system will gain benefit of performance and resolving metaclass
209
+ # conflicts as elaborated in https://www.python.org/dev/peps/pep-0560/
210
+
211
+
212
+ class _DataPipeType:
213
+ r"""Save type annotation in `param`."""
214
+
215
+ def __init__(self, param):
216
+ self.param = param
217
+
218
+ def __repr__(self):
219
+ return _type_repr(self.param)
220
+
221
+ def __eq__(self, other):
222
+ if isinstance(other, _DataPipeType):
223
+ return self.param == other.param
224
+ return NotImplemented
225
+
226
+ def __hash__(self):
227
+ return hash(self.param)
228
+
229
+ def issubtype(self, other):
230
+ if isinstance(other.param, _GenericAlias):
231
+ if getattr(other.param, '__origin__', None) is Generic:
232
+ return True
233
+ if isinstance(other, _DataPipeType):
234
+ return issubtype(self.param, other.param)
235
+ if isinstance(other, type):
236
+ return issubtype(self.param, other)
237
+ raise TypeError(f"Expected '_DataPipeType' or 'type', but found {type(other)}")
238
+
239
+ def issubtype_of_instance(self, other):
240
+ return issubinstance(other, self.param)
241
+
242
+
243
+ # Default type for DataPipe without annotation
244
+ T_co = TypeVar('T_co', covariant=True)
245
+ _DEFAULT_TYPE = _DataPipeType(Generic[T_co])
246
+
247
+
248
+ class _DataPipeMeta(GenericMeta):
249
+ r"""
250
+ Metaclass for `DataPipe`.
251
+
252
+ Add `type` attribute and `__init_subclass__` based on the type, and validate the return hint of `__iter__`.
253
+
254
+ Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`.
255
+ """
256
+
257
+ type: _DataPipeType
258
+
259
+ def __new__(cls, name, bases, namespace, **kwargs):
260
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
261
+
262
+ # TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now.
263
+ cls.__origin__ = None
264
+ if 'type' in namespace:
265
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
266
+
267
+ namespace['__type_class__'] = False
268
+ # For plain derived class without annotation
269
+ for base in bases:
270
+ if isinstance(base, _DataPipeMeta):
271
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
272
+
273
+ namespace.update({'type': _DEFAULT_TYPE,
274
+ '__init_subclass__': _dp_init_subclass})
275
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
276
+
277
+ def __init__(self, name, bases, namespace, **kwargs):
278
+ super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload]
279
+
280
+ # TODO: Fix isinstance bug
281
+ @_tp_cache
282
+ def _getitem_(self, params):
283
+ if params is None:
284
+ raise TypeError(f'{self.__name__}[t]: t can not be None')
285
+ if isinstance(params, str):
286
+ params = ForwardRef(params)
287
+ if not isinstance(params, tuple):
288
+ params = (params, )
289
+
290
+ msg = f"{self.__name__}[t]: t must be a type"
291
+ params = tuple(_type_check(p, msg) for p in params)
292
+
293
+ if isinstance(self.type.param, _GenericAlias):
294
+ orig = getattr(self.type.param, '__origin__', None)
295
+ if isinstance(orig, type) and orig is not Generic:
296
+ p = self.type.param[params] # type: ignore[index]
297
+ t = _DataPipeType(p)
298
+ l = len(str(self.type)) + 2
299
+ name = self.__name__[:-l]
300
+ name = name + '[' + str(t) + ']'
301
+ bases = (self,) + self.__bases__
302
+ return self.__class__(name, bases,
303
+ {'__init_subclass__': _dp_init_subclass,
304
+ 'type': t,
305
+ '__type_class__': True})
306
+
307
+ if len(params) > 1:
308
+ raise TypeError(f'Too many parameters for {self} actual {len(params)}, expected 1')
309
+
310
+ t = _DataPipeType(params[0])
311
+
312
+ if not t.issubtype(self.type):
313
+ raise TypeError(f'Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]')
314
+
315
+ # Types are equal, fast path for inheritance
316
+ if self.type == t:
317
+ return self
318
+
319
+ name = self.__name__ + '[' + str(t) + ']'
320
+ bases = (self,) + self.__bases__
321
+
322
+ return self.__class__(name, bases,
323
+ {'__init_subclass__': _dp_init_subclass,
324
+ '__type_class__': True,
325
+ 'type': t})
326
+
327
+ # TODO: Fix isinstance bug
328
+ def _eq_(self, other):
329
+ if not isinstance(other, _DataPipeMeta):
330
+ return NotImplemented
331
+ if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type]
332
+ return self is other
333
+ return (self.__origin__ == other.__origin__ # type: ignore[has-type]
334
+ and self.type == other.type)
335
+
336
+ # TODO: Fix isinstance bug
337
+ def _hash_(self):
338
+ return hash((self.__name__, self.type))
339
+
340
+
341
+ class _IterDataPipeMeta(_DataPipeMeta):
342
+ r"""
343
+ Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`.
344
+
345
+ Add various functions for behaviors specific to `IterDataPipe`.
346
+ """
347
+
348
+ def __new__(cls, name, bases, namespace, **kwargs):
349
+
350
+ if 'reset' in namespace:
351
+ reset_func = namespace['reset']
352
+
353
+ @functools.wraps(reset_func)
354
+ def conditional_reset(*args, **kwargs):
355
+ r"""
356
+ Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating` or `NotStarted`.
357
+
358
+ This allows recently restored DataPipe to preserve its restored state during the initial `__iter__` call.
359
+ """
360
+ datapipe = args[0]
361
+ if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted):
362
+ # Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have
363
+ # already begun iterating.
364
+ datapipe._number_of_samples_yielded = 0
365
+ datapipe._fast_forward_iterator = None
366
+ reset_func(*args, **kwargs)
367
+ datapipe._snapshot_state = _SnapshotState.Iterating
368
+
369
+ namespace['reset'] = conditional_reset
370
+
371
+ if '__iter__' in namespace:
372
+ hook_iterator(namespace)
373
+ return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
374
+
375
+
376
+ def _dp_init_subclass(sub_cls, *args, **kwargs):
377
+ # Add function for datapipe instance to reinforce the type
378
+ sub_cls.reinforce_type = reinforce_type
379
+
380
+ # TODO:
381
+ # - add global switch for type checking at compile-time
382
+
383
+ # Ignore internal type class
384
+ if getattr(sub_cls, '__type_class__', False):
385
+ return
386
+
387
+ # Check if the string type is valid
388
+ if isinstance(sub_cls.type.param, ForwardRef):
389
+ base_globals = sys.modules[sub_cls.__module__].__dict__
390
+ try:
391
+ param = _eval_type(sub_cls.type.param, base_globals, locals())
392
+ sub_cls.type.param = param
393
+ except TypeError as e:
394
+ raise TypeError(f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing") from e
395
+
396
+ if '__iter__' in sub_cls.__dict__:
397
+ iter_fn = sub_cls.__dict__['__iter__']
398
+ hints = get_type_hints(iter_fn)
399
+ if 'return' in hints:
400
+ return_hint = hints['return']
401
+ # Plain Return Hint for Python 3.6
402
+ if return_hint == Iterator:
403
+ return
404
+ if not (hasattr(return_hint, '__origin__') and
405
+ (return_hint.__origin__ == Iterator or
406
+ return_hint.__origin__ == collections.abc.Iterator)):
407
+ raise TypeError("Expected 'Iterator' as the return annotation for `__iter__` of {}"
408
+ ", but found {}".format(sub_cls.__name__, _type_repr(hints['return'])))
409
+ data_type = return_hint.__args__[0]
410
+ if not issubtype(data_type, sub_cls.type.param):
411
+ raise TypeError("Expected return type of '__iter__' as a subtype of {}, but found {}"
412
+ " for {}".format(sub_cls.type, _type_repr(data_type), sub_cls.__name__))
413
+
414
+
415
+ def reinforce_type(self, expected_type):
416
+ r"""
417
+ Reinforce the type for DataPipe instance.
418
+
419
+ And the 'expected_type' is required to be a subtype of the original type
420
+ hint to restrict the type requirement of DataPipe instance.
421
+ """
422
+ if isinstance(expected_type, tuple):
423
+ expected_type = Tuple[expected_type]
424
+ _type_check(expected_type, msg="'expected_type' must be a type")
425
+
426
+ if not issubtype(expected_type, self.type.param):
427
+ raise TypeError(f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}")
428
+
429
+ self.type = _DataPipeType(expected_type)
430
+ return self
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data.datapipes.dataframe.dataframes import (
2
+ CaptureDataFrame, DFIterDataPipe,
3
+ )
4
+ from torch.utils.data.datapipes.dataframe.datapipes import (
5
+ DataFramesAsTuplesPipe,
6
+ )
7
+
8
+ __all__ = ['CaptureDataFrame', 'DFIterDataPipe', 'DataFramesAsTuplesPipe']
9
+
10
+ # Please keep this list sorted
11
+ assert __all__ == sorted(__all__)
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (477 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc ADDED
Binary file (4.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from torch.utils.data.datapipes._decorator import functional_datapipe
4
+ from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe
5
+
6
+ from torch.utils.data.datapipes.dataframe.structures import DataChunkDF
7
+
8
+ # TODO(VitalyFedyunin): Add error when two different traces get combined
9
+
10
+ __all__ = [
11
+ "Capture",
12
+ "CaptureA",
13
+ "CaptureAdd",
14
+ "CaptureCall",
15
+ "CaptureControl",
16
+ "CaptureDataFrame",
17
+ "CaptureDataFrameWithDataPipeOps",
18
+ "CaptureF",
19
+ "CaptureGetAttr",
20
+ "CaptureGetItem",
21
+ "CaptureInitial",
22
+ "CaptureLikeMock",
23
+ "CaptureMul",
24
+ "CaptureSetItem",
25
+ "CaptureSub",
26
+ "CaptureVariable",
27
+ "CaptureVariableAssign",
28
+ "DataFrameTracer",
29
+ "DataFrameTracedOps",
30
+ "disable_capture",
31
+ "get_val",
32
+ ]
33
+
34
+
35
+ def disable_capture():
36
+ CaptureControl.disabled = True
37
+
38
+
39
+ class CaptureControl:
40
+ disabled = False
41
+
42
+
43
+ class DataFrameTracedOps(DFIterDataPipe):
44
+ def __init__(self, source_datapipe, output_var):
45
+ self.source_datapipe = source_datapipe
46
+ self.output_var = output_var
47
+
48
+ def __iter__(self):
49
+ for item in self.source_datapipe:
50
+ yield self.output_var.apply_ops(item)
51
+
52
+
53
+ # TODO(VitalyFedyunin): Extract this list from the DFIterDataPipe registred functions
54
+ DATAPIPES_OPS = ['_dataframes_as_tuples', 'groupby', '_dataframes_filter', 'map', 'to_datapipe',
55
+ 'shuffle', 'concat', 'batch', '_dataframes_per_row', '_dataframes_concat', '_dataframes_shuffle']
56
+
57
+ UNIMPLEMENTED_ATTR = ['__deepcopy__', '__setstate__', 'is_shardable', 'apply_sharding']
58
+
59
+
60
+ class Capture:
61
+ # TODO: All operations are shared across entire InitialCapture, need to figure out what if we join two captures
62
+
63
+ def __init__(self, schema_df=None):
64
+ self.ctx = {'operations': [], 'variables': [], 'schema_df': schema_df}
65
+
66
+ def __str__(self):
67
+ return self._ops_str()
68
+
69
+ def _ops_str(self):
70
+ res = ""
71
+ for op in self.ctx['operations']:
72
+ if len(res) > 0:
73
+ res += "\n"
74
+ res += str(op)
75
+ return res
76
+
77
+ def __getstate__(self):
78
+ # TODO(VitalyFedyunin): Currently can't pickle (why?)
79
+ self.ctx['schema_df'] = None
80
+ for var in self.ctx['variables']:
81
+ var.calculated_value = None
82
+ state = {}
83
+ for item in self.__dict__:
84
+ state[item] = getattr(self, item)
85
+ return state
86
+
87
+ def __setstate__(self, state):
88
+ for k, v in state.items():
89
+ setattr(self, k, v)
90
+
91
+ def __getattr__(self, attrname):
92
+ if attrname == 'kwarg' or attrname == 'kwargs':
93
+ raise Exception('no kwargs!')
94
+ if attrname in ['__deepcopy__']:
95
+ raise AttributeError()
96
+ result = CaptureGetAttr(self, attrname, ctx=self.ctx)
97
+ return result
98
+
99
+ def __getitem__(self, key):
100
+ return CaptureGetItem(self, key, ctx=self.ctx)
101
+
102
+ def __setitem__(self, key, value):
103
+ self.ctx['operations'].append(
104
+ CaptureSetItem(self, key, value, ctx=self.ctx))
105
+
106
+ def __add__(self, add_val):
107
+ res = CaptureAdd(self, add_val, ctx=self.ctx)
108
+ var = CaptureVariable(res, ctx=self.ctx)
109
+ self.ctx['operations'].append(
110
+ CaptureVariableAssign(variable=var, value=res, ctx=self.ctx))
111
+ return var
112
+
113
+ def __sub__(self, add_val):
114
+ res = CaptureSub(self, add_val, ctx=self.ctx)
115
+ var = CaptureVariable(res, ctx=self.ctx)
116
+ self.ctx['operations'].append(
117
+ CaptureVariableAssign(variable=var, value=res, ctx=self.ctx))
118
+ return var
119
+
120
+ def __mul__(self, add_val):
121
+ res = CaptureMul(self, add_val, ctx=self.ctx)
122
+ var = CaptureVariable(res, ctx=self.ctx)
123
+ t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
124
+ self.ctx['operations'].append(t)
125
+ return var
126
+
127
+ def _is_context_empty(self):
128
+ return len(self.ctx['operations']) == 0 and len(self.ctx['variables']) == 0
129
+
130
+ def apply_ops_2(self, dataframe):
131
+ # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
132
+ self.ctx['variables'][0].calculated_value = dataframe
133
+ for op in self.ctx['operations']:
134
+ op.execute()
135
+
136
+ @property
137
+ def columns(self):
138
+ self.apply_ops_2(self.ctx['schema_df'])
139
+ value = self.execute()
140
+ return value.columns
141
+
142
+ # TODO(VitalyFedyunin): Add tests
143
+ # TODO(VitalyFedyunin): Need to join context if one of them are empty because we used capture
144
+
145
+ def __call__(self, *args, **kwargs):
146
+ # TODO: Check if args or kwargs have more than one different context
147
+ if self._is_context_empty():
148
+ # TODO: Allow CaptureA to take context from mock
149
+ for arg in args:
150
+ if isinstance(arg, Capture) and not arg._is_context_empty():
151
+ self.ctx = arg.ctx
152
+ break
153
+ if self._is_context_empty():
154
+ for k, v in kwargs.items():
155
+ if isinstance(k, Capture) and not k._is_context_empty():
156
+ self.ctx = k.ctx
157
+ break
158
+ if isinstance(v, Capture) and not v._is_context_empty():
159
+ self.ctx = v.ctx
160
+ break
161
+
162
+ res = CaptureCall(self, ctx=self.ctx, args=args, kwargs=kwargs)
163
+ var = CaptureVariable(None, ctx=self.ctx)
164
+ t = CaptureVariableAssign(ctx=self.ctx, variable=var, value=res)
165
+ self.ctx['operations'].append(t)
166
+ return var
167
+
168
+
169
+ class CaptureF(Capture):
170
+ def __init__(self, ctx=None, **kwargs):
171
+ if ctx is None:
172
+ self.ctx = {'operations': [], 'variables': []}
173
+ else:
174
+ self.ctx = ctx
175
+ self.kwargs = kwargs
176
+
177
+
178
+ class CaptureA(CaptureF):
179
+ def __str__(self):
180
+ return f"{self.kwargs['name']}"
181
+
182
+ def execute(self):
183
+ value = self.kwargs['real_attribute']
184
+ return value
185
+
186
+
187
+ class CaptureLikeMock:
188
+ def __init__(self, name):
189
+ import unittest.mock as mock
190
+ # TODO(VitalyFedyunin): Do not use provate function here, copy own implementation instead.
191
+ get_target, attribute = mock._get_target(name) # type: ignore[attr-defined]
192
+ self.get_target = get_target
193
+ self.attribute = attribute
194
+ self.name = name
195
+
196
+ def __enter__(self):
197
+ self.save = getattr(self.get_target(), self.attribute)
198
+ capt = CaptureA(name=self.name, real_attribute=self.save)
199
+ setattr(self.get_target(), self.attribute, capt)
200
+
201
+ def __exit__(self, *exc_info):
202
+ setattr(self.get_target(), self.attribute, self.save)
203
+
204
+
205
+ class CaptureCall(Capture):
206
+
207
+ def __init__(self, callable, ctx=None, **kwargs):
208
+ if ctx is None:
209
+ self.ctx = {'operations': [], 'variables': []}
210
+ else:
211
+ self.ctx = ctx
212
+ self.kwargs = kwargs
213
+ self.callable = callable
214
+
215
+ def __str__(self):
216
+ return "{callable}({args},{kwargs})".format(callable=self.callable, **self.kwargs)
217
+
218
+ def execute(self):
219
+
220
+ # TODO: VitalyFedyunin execute kwargs and maybe nested structures
221
+ executed_args = []
222
+ for arg in self.kwargs['args']:
223
+ if isinstance(arg, Capture):
224
+ executed_args.append(arg.execute())
225
+ else:
226
+ executed_args.append(arg)
227
+ left = get_val(self.callable)
228
+ return left(*executed_args, **self.kwargs['kwargs'])
229
+
230
+
231
+ class CaptureVariableAssign(CaptureF):
232
+ def __str__(self):
233
+ variable = self.kwargs['variable']
234
+ value = self.kwargs['value']
235
+ return f"{variable} = {value}"
236
+
237
+ def execute(self):
238
+ self.kwargs['variable'].calculated_value = self.kwargs['value'].execute()
239
+
240
+
241
+ class CaptureVariable(Capture):
242
+ # TODO(VitalyFedyunin): This should be atomic and thread safe
243
+ names_idx = 0
244
+
245
+ def __init__(self, value, ctx):
246
+ if CaptureControl.disabled:
247
+ raise Exception('Attempting to create capture variable with capture off')
248
+ self.ctx = ctx
249
+ self.value = value
250
+ self.name = f'var_{CaptureVariable.names_idx}'
251
+ CaptureVariable.names_idx += 1
252
+ self.ctx['variables'].append(self)
253
+
254
+ def __str__(self):
255
+ return self.name
256
+
257
+ def execute(self):
258
+ return self.calculated_value
259
+
260
+ def apply_ops(self, dataframe):
261
+ # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
262
+ self.ctx['variables'][0].calculated_value = dataframe
263
+ for op in self.ctx['operations']:
264
+ op.execute()
265
+ return self.calculated_value
266
+
267
+
268
+ class CaptureGetItem(Capture):
269
+ def __init__(self, left, key, ctx):
270
+ self.ctx = ctx
271
+ self.left = left
272
+ self.key = key
273
+
274
+ def __str__(self):
275
+ return f"{self.left}[{get_val(self.key)}]"
276
+
277
+ def execute(self):
278
+ left = self.left.execute()
279
+ return left[self.key]
280
+
281
+
282
+ class CaptureSetItem(Capture):
283
+ def __init__(self, left, key, value, ctx):
284
+ self.ctx = ctx
285
+ self.left = left
286
+ self.key = key
287
+ self.value = value
288
+
289
+ def __str__(self):
290
+ return f"{self.left}[{get_val(self.key)}] = {self.value}"
291
+
292
+ def execute(self):
293
+ left = self.left.execute()
294
+ value = self.value.execute()
295
+ left[self.key] = value
296
+
297
+
298
+ class CaptureAdd(Capture):
299
+ def __init__(self, left, right, ctx):
300
+ self.ctx = ctx
301
+ self.left = left
302
+ self.right = right
303
+
304
+ def __str__(self):
305
+ return f"{self.left} + {self.right}"
306
+
307
+ def execute(self):
308
+ return get_val(self.left) + get_val(self.right)
309
+
310
+
311
+ class CaptureMul(Capture):
312
+ def __init__(self, left, right, ctx):
313
+ self.ctx = ctx
314
+ self.left = left
315
+ self.right = right
316
+
317
+ def __str__(self):
318
+ return f"{self.left} * {self.right}"
319
+
320
+ def execute(self):
321
+ return get_val(self.left) * get_val(self.right)
322
+
323
+
324
+ class CaptureSub(Capture):
325
+ def __init__(self, left, right, ctx):
326
+ self.ctx = ctx
327
+ self.left = left
328
+ self.right = right
329
+
330
+ def __str__(self):
331
+ return f"{self.left} - {self.right}"
332
+
333
+ def execute(self):
334
+ return get_val(self.left) - get_val(self.right)
335
+
336
+
337
+ class CaptureGetAttr(Capture):
338
+ def __init__(self, src, name, ctx):
339
+ self.ctx = ctx
340
+ self.src = src
341
+ self.name = name
342
+
343
+ def __str__(self):
344
+ return f"{self.src}.{self.name}"
345
+
346
+ def execute(self):
347
+ val = get_val(self.src)
348
+ return getattr(val, self.name)
349
+
350
+
351
+ def get_val(capture):
352
+ if isinstance(capture, Capture):
353
+ return capture.execute()
354
+ elif isinstance(capture, str):
355
+ return f'"{capture}"'
356
+ else:
357
+ return capture
358
+
359
+
360
+ class CaptureInitial(CaptureVariable):
361
+ def __init__(self, schema_df=None):
362
+ new_ctx: Dict[str, List[Any]] = {'operations': [], 'variables': [], 'schema_df': schema_df}
363
+ super().__init__(None, new_ctx)
364
+ self.name = f'input_{self.name}'
365
+
366
+
367
+ class CaptureDataFrame(CaptureInitial):
368
+ pass
369
+
370
+
371
+ class CaptureDataFrameWithDataPipeOps(CaptureDataFrame):
372
+ def as_datapipe(self):
373
+ return DataFrameTracedOps(
374
+ self.ctx['variables'][0].source_datapipe, self)
375
+
376
+ def raw_iterator(self):
377
+ return self.as_datapipe().__iter__()
378
+
379
+ def __iter__(self):
380
+ return iter(self._dataframes_as_tuples())
381
+
382
+ def batch(self, batch_size=10, drop_last: bool = False, wrapper_class=DataChunkDF):
383
+ dp = self._dataframes_per_row()._dataframes_concat(batch_size)
384
+ dp = dp.as_datapipe().batch(1, drop_last=drop_last, wrapper_class=wrapper_class)
385
+ dp._dp_contains_dataframe = True
386
+ return dp
387
+
388
+ def groupby(self,
389
+ group_key_fn,
390
+ *,
391
+ buffer_size=10000,
392
+ group_size=None,
393
+ guaranteed_group_size=None,
394
+ drop_remaining=False):
395
+ dp = self._dataframes_per_row()
396
+ dp = dp.as_datapipe().groupby(group_key_fn, buffer_size=buffer_size, group_size=group_size,
397
+ guaranteed_group_size=guaranteed_group_size, drop_remaining=drop_remaining)
398
+ return dp
399
+
400
+ def shuffle(self, *args, **kwargs):
401
+ return self._dataframes_shuffle(*args, **kwargs)
402
+
403
+ def filter(self, *args, **kwargs):
404
+ return self._dataframes_filter(*args, **kwargs)
405
+
406
+ def collate(self, *args, **kwargs):
407
+ raise Exception("Can't collate unbatched DataFrames stream")
408
+
409
+ def __getattr__(self, attrname): # ?
410
+ if attrname in UNIMPLEMENTED_ATTR:
411
+ raise AttributeError('Attempting to get ', attrname)
412
+ if attrname in DATAPIPES_OPS:
413
+ return (self.as_datapipe()).__getattr__(attrname)
414
+ return super().__getattr__(attrname)
415
+
416
+
417
+ @functional_datapipe('trace_as_dataframe')
418
+ class DataFrameTracer(CaptureDataFrameWithDataPipeOps, IterDataPipe): # type: ignore[misc]
419
+ source_datapipe: Optional[Any] = None
420
+
421
+ # TODO(VitalyFedyunin): Must implement all special functions of datapipes
422
+
423
+ def set_shuffle_settings(self, *args, **kwargs):
424
+ pass
425
+
426
+ def is_shardable(self):
427
+ return False
428
+
429
+ def __init__(self, source_datapipe, schema_df=None):
430
+ self.source_datapipe = source_datapipe
431
+ if schema_df is None:
432
+ schema_df = next(iter(self.source_datapipe))
433
+ super().__init__(schema_df=schema_df)
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ from torch.utils.data.datapipes._decorator import functional_datapipe
4
+ from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe
5
+
6
+ from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
7
+
8
+ __all__ = [
9
+ "ConcatDataFramesPipe",
10
+ "DataFramesAsTuplesPipe",
11
+ "ExampleAggregateAsDataFrames",
12
+ "FilterDataFramesPipe",
13
+ "PerRowDataFramesPipe",
14
+ "ShuffleDataFramesPipe",
15
+ ]
16
+
17
+
18
+ @functional_datapipe('_dataframes_as_tuples')
19
+ class DataFramesAsTuplesPipe(IterDataPipe):
20
+ def __init__(self, source_datapipe):
21
+ self.source_datapipe = source_datapipe
22
+
23
+ def __iter__(self):
24
+ for df in self.source_datapipe:
25
+ # for record in df.to_records(index=False):
26
+ yield from df_wrapper.iterate(df)
27
+
28
+
29
+ @functional_datapipe('_dataframes_per_row', enable_df_api_tracing=True)
30
+ class PerRowDataFramesPipe(DFIterDataPipe):
31
+ def __init__(self, source_datapipe):
32
+ self.source_datapipe = source_datapipe
33
+
34
+ def __iter__(self):
35
+ for df in self.source_datapipe:
36
+ # TODO(VitalyFedyunin): Replacing with TorchArrow only API, as we are dropping pandas as followup
37
+ for i in range(len(df)):
38
+ yield df[i:i + 1]
39
+
40
+
41
+ @functional_datapipe('_dataframes_concat', enable_df_api_tracing=True)
42
+ class ConcatDataFramesPipe(DFIterDataPipe):
43
+ def __init__(self, source_datapipe, batch=3):
44
+ self.source_datapipe = source_datapipe
45
+ self.n_batch = batch
46
+
47
+ def __iter__(self):
48
+ buffer = []
49
+ for df in self.source_datapipe:
50
+ buffer.append(df)
51
+ if len(buffer) == self.n_batch:
52
+ yield df_wrapper.concat(buffer)
53
+ buffer = []
54
+ if len(buffer):
55
+ yield df_wrapper.concat(buffer)
56
+
57
+
58
+ @functional_datapipe('_dataframes_shuffle', enable_df_api_tracing=True)
59
+ class ShuffleDataFramesPipe(DFIterDataPipe):
60
+ def __init__(self, source_datapipe):
61
+ self.source_datapipe = source_datapipe
62
+
63
+ def __iter__(self):
64
+ size = None
65
+ all_buffer = []
66
+ for df in self.source_datapipe:
67
+ if size is None:
68
+ size = df_wrapper.get_len(df)
69
+ for i in range(df_wrapper.get_len(df)):
70
+ all_buffer.append(df_wrapper.get_item(df, i))
71
+ random.shuffle(all_buffer)
72
+ buffer = []
73
+ for df in all_buffer:
74
+ buffer.append(df)
75
+ if len(buffer) == size:
76
+ yield df_wrapper.concat(buffer)
77
+ buffer = []
78
+ if len(buffer):
79
+ yield df_wrapper.concat(buffer)
80
+
81
+
82
+ @functional_datapipe('_dataframes_filter', enable_df_api_tracing=True)
83
+ class FilterDataFramesPipe(DFIterDataPipe):
84
+ def __init__(self, source_datapipe, filter_fn):
85
+ self.source_datapipe = source_datapipe
86
+ self.filter_fn = filter_fn
87
+
88
+ def __iter__(self):
89
+ size = None
90
+ all_buffer = []
91
+ filter_res = []
92
+ for df in self.source_datapipe:
93
+ if size is None:
94
+ size = len(df.index)
95
+ for i in range(len(df.index)):
96
+ all_buffer.append(df[i:i + 1])
97
+ filter_res.append(self.filter_fn(df.iloc[i]))
98
+
99
+ buffer = []
100
+ for df, res in zip(all_buffer, filter_res):
101
+ if res:
102
+ buffer.append(df)
103
+ if len(buffer) == size:
104
+ yield df_wrapper.concat(buffer)
105
+ buffer = []
106
+ if len(buffer):
107
+ yield df_wrapper.concat(buffer)
108
+
109
+
110
+ @functional_datapipe('_to_dataframes_pipe', enable_df_api_tracing=True)
111
+ class ExampleAggregateAsDataFrames(DFIterDataPipe):
112
+ def __init__(self, source_datapipe, dataframe_size=10, columns=None):
113
+ self.source_datapipe = source_datapipe
114
+ self.columns = columns
115
+ self.dataframe_size = dataframe_size
116
+
117
+ def _as_list(self, item):
118
+ try:
119
+ return list(item)
120
+ except Exception: # TODO(VitalyFedyunin): Replace with better iterable exception
121
+ return [item]
122
+
123
+ def __iter__(self):
124
+ aggregate = []
125
+ for item in self.source_datapipe:
126
+ aggregate.append(self._as_list(item))
127
+ if len(aggregate) == self.dataframe_size:
128
+ yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
129
+ aggregate = []
130
+ if len(aggregate) > 0:
131
+ yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data.datapipes.datapipe import DataChunk
2
+ from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
3
+
4
+ __all__ = ["DataChunkDF", ]
5
+
6
+
7
+ class DataChunkDF(DataChunk):
8
+ """DataChunkDF iterating over individual items inside of DataFrame containers, to access DataFrames user `raw_iterator`."""
9
+
10
+ def __iter__(self):
11
+ for df in self.items:
12
+ yield from df_wrapper.iterate(df)
13
+
14
+ def __len__(self):
15
+ total_len = 0
16
+ for df in self.items:
17
+ total_len += df_wrapper.get_len(df)
18
+ return total_len
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import pickle
3
+ from typing import Dict, Callable, Optional, TypeVar, Generic, Iterator
4
+
5
+ from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta
6
+ from torch.utils.data.datapipes._hook_iterator import _SnapshotState
7
+ from torch.utils.data.datapipes.utils.common import (
8
+ _deprecation_warning,
9
+ _iter_deprecated_functional_names,
10
+ _map_deprecated_functional_names,
11
+ )
12
+ from torch.utils.data.dataset import Dataset, IterableDataset
13
+ from torch.utils._import_utils import import_dill
14
+
15
+ dill = import_dill()
16
+ HAS_DILL = dill is not None
17
+
18
+ __all__ = [
19
+ "DataChunk",
20
+ "DFIterDataPipe",
21
+ "IterDataPipe",
22
+ "MapDataPipe",
23
+ ]
24
+
25
+ T = TypeVar('T')
26
+ T_co = TypeVar('T_co', covariant=True)
27
+
28
+ UNTRACABLE_DATAFRAME_PIPES = ['batch', # As it returns DataChunks
29
+ 'groupby', # As it returns DataChunks
30
+ '_dataframes_as_tuples', # As it unpacks DF
31
+ 'trace_as_dataframe', # As it used to mark DF for tracing
32
+ ]
33
+
34
+
35
+ class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta):
36
+ r"""
37
+ Iterable-style DataPipe.
38
+
39
+ All DataPipes that represent an iterable of data samples should subclass this.
40
+ This style of DataPipes is particularly useful when data come from a stream, or
41
+ when the number of samples is too large to fit them all in memory. ``IterDataPipe`` is lazily initialized and its
42
+ elements are computed only when ``next()`` is called on the iterator of an ``IterDataPipe``.
43
+
44
+ All subclasses should overwrite :meth:`__iter__`, which would return an
45
+ iterator of samples in this DataPipe. Calling ``__iter__`` of an ``IterDataPipe`` automatically invokes its
46
+ method ``reset()``, which by default performs no operation. When writing a custom ``IterDataPipe``, users should
47
+ override ``reset()`` if necessary. The common usages include resetting buffers, pointers,
48
+ and various state variables within the custom ``IterDataPipe``.
49
+
50
+ Note:
51
+ Only `one` iterator can be valid for each ``IterDataPipe`` at a time,
52
+ and the creation a second iterator will invalidate the first one. This constraint is necessary because
53
+ some ``IterDataPipe`` have internal buffers, whose states can become invalid if there are multiple iterators.
54
+ The code example below presents details on how this constraint looks in practice.
55
+ If you have any feedback related to this constraint, please see `GitHub IterDataPipe Single Iterator Issue`_.
56
+
57
+ These DataPipes can be invoked in two ways, using the class constructor or applying their
58
+ functional form onto an existing ``IterDataPipe`` (recommended, available to most but not all DataPipes).
59
+ You can chain multiple `IterDataPipe` together to form a pipeline that will perform multiple
60
+ operations in succession.
61
+
62
+ .. _GitHub IterDataPipe Single Iterator Issue:
63
+ https://github.com/pytorch/data/issues/45
64
+
65
+ Note:
66
+ When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
67
+ item in the DataPipe will be yielded from the :class:`~torch.utils.data.DataLoader`
68
+ iterator. When :attr:`num_workers > 0`, each worker process will have a
69
+ different copy of the DataPipe object, so it is often desired to configure
70
+ each copy independently to avoid having duplicate data returned from the
71
+ workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
72
+ process, returns information about the worker. It can be used in either the
73
+ dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
74
+ :attr:`worker_init_fn` option to modify each copy's behavior.
75
+
76
+ Examples:
77
+ General Usage:
78
+ >>> # xdoctest: +SKIP
79
+ >>> from torchdata.datapipes.iter import IterableWrapper, Mapper
80
+ >>> dp = IterableWrapper(range(10))
81
+ >>> map_dp_1 = Mapper(dp, lambda x: x + 1) # Using class constructor
82
+ >>> map_dp_2 = dp.map(lambda x: x + 1) # Using functional form (recommended)
83
+ >>> list(map_dp_1)
84
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
85
+ >>> list(map_dp_2)
86
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
87
+ >>> filter_dp = map_dp_1.filter(lambda x: x % 2 == 0)
88
+ >>> list(filter_dp)
89
+ [2, 4, 6, 8, 10]
90
+ Single Iterator Constraint Example:
91
+ >>> from torchdata.datapipes.iter import IterableWrapper, Mapper
92
+ >>> source_dp = IterableWrapper(range(10))
93
+ >>> it1 = iter(source_dp)
94
+ >>> list(it1)
95
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
96
+ >>> it1 = iter(source_dp)
97
+ >>> it2 = iter(source_dp) # The creation of a new iterator invalidates `it1`
98
+ >>> next(it2)
99
+ 0
100
+ >>> next(it1) # Further usage of `it1` will raise a `RunTimeError`
101
+ """
102
+
103
+ functions: Dict[str, Callable] = {}
104
+ reduce_ex_hook: Optional[Callable] = None
105
+ getstate_hook: Optional[Callable] = None
106
+ str_hook: Optional[Callable] = None
107
+ repr_hook: Optional[Callable] = None
108
+ _valid_iterator_id: Optional[int] = None
109
+ _number_of_samples_yielded: int = 0
110
+ _snapshot_state: _SnapshotState = _SnapshotState.NotStarted
111
+ _fast_forward_iterator: Optional[Iterator] = None
112
+
113
+ def __iter__(self) -> Iterator[T_co]:
114
+ return self
115
+
116
+ def __getattr__(self, attribute_name):
117
+ if attribute_name in IterDataPipe.functions:
118
+ if attribute_name in _iter_deprecated_functional_names:
119
+ kwargs = _iter_deprecated_functional_names[attribute_name]
120
+ _deprecation_warning(**kwargs)
121
+ f = IterDataPipe.functions[attribute_name]
122
+ function = functools.partial(f, self)
123
+ functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",))
124
+ return function
125
+ else:
126
+ raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute_name}")
127
+
128
+ @classmethod
129
+ def register_function(cls, function_name, function):
130
+ cls.functions[function_name] = function
131
+
132
+ @classmethod
133
+ def register_datapipe_as_function(cls, function_name, cls_to_register, enable_df_api_tracing=False):
134
+ if function_name in cls.functions:
135
+ raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken")
136
+
137
+ def class_function(cls, enable_df_api_tracing, source_dp, *args, **kwargs):
138
+ result_pipe = cls(source_dp, *args, **kwargs)
139
+ if isinstance(result_pipe, IterDataPipe):
140
+ if enable_df_api_tracing or isinstance(source_dp, DFIterDataPipe):
141
+ if function_name not in UNTRACABLE_DATAFRAME_PIPES:
142
+ result_pipe = result_pipe.trace_as_dataframe()
143
+
144
+ return result_pipe
145
+
146
+ function = functools.partial(
147
+ class_function, cls_to_register, enable_df_api_tracing
148
+ )
149
+ functools.update_wrapper(
150
+ wrapper=function, wrapped=cls_to_register, assigned=("__doc__",)
151
+ )
152
+ cls.functions[function_name] = function
153
+
154
+ def __getstate__(self):
155
+ """
156
+ Serialize `lambda` functions when `dill` is available.
157
+
158
+ If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
159
+ `__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
160
+ """
161
+ state = self.__dict__
162
+ if IterDataPipe.getstate_hook is not None:
163
+ return IterDataPipe.getstate_hook(state)
164
+ return state
165
+
166
+ def __reduce_ex__(self, *args, **kwargs):
167
+ if IterDataPipe.reduce_ex_hook is not None:
168
+ try:
169
+ return IterDataPipe.reduce_ex_hook(self)
170
+ except NotImplementedError:
171
+ pass
172
+ return super().__reduce_ex__(*args, **kwargs)
173
+
174
+ @classmethod
175
+ def set_getstate_hook(cls, hook_fn):
176
+ if IterDataPipe.getstate_hook is not None and hook_fn is not None:
177
+ raise Exception("Attempt to override existing getstate_hook")
178
+ IterDataPipe.getstate_hook = hook_fn
179
+
180
+ @classmethod
181
+ def set_reduce_ex_hook(cls, hook_fn):
182
+ if IterDataPipe.reduce_ex_hook is not None and hook_fn is not None:
183
+ raise Exception("Attempt to override existing reduce_ex_hook")
184
+ IterDataPipe.reduce_ex_hook = hook_fn
185
+
186
+ def __repr__(self):
187
+ if self.repr_hook is not None:
188
+ return self.repr_hook(self)
189
+ # Instead of showing <torch. ... .MapperIterDataPipe object at 0x.....>, return the class name
190
+ return str(self.__class__.__qualname__)
191
+
192
+ def __str__(self):
193
+ if self.str_hook is not None:
194
+ return self.str_hook(self)
195
+ # Instead of showing <torch. ... .MapperIterDataPipe object at 0x.....>, return the class name
196
+ return str(self.__class__.__qualname__)
197
+
198
+ def __dir__(self):
199
+ # for auto-completion in a REPL (e.g. Jupyter notebook)
200
+ return list(super().__dir__()) + list(self.functions.keys())
201
+
202
+ def reset(self) -> None:
203
+ r"""
204
+ Reset the `IterDataPipe` to the initial state.
205
+
206
+ By default, no-op. For subclasses of `IterDataPipe`, depending on their functionalities,
207
+ they may want to override this method with implementations that
208
+ may clear the buffers and reset pointers of the DataPipe.
209
+ The `reset` method is always called when `__iter__` is called as part of `hook_iterator`.
210
+ """
211
+ pass
212
+
213
+
214
+ class DFIterDataPipe(IterDataPipe):
215
+ def _is_dfpipe(self):
216
+ return True
217
+
218
+
219
+ class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta):
220
+ r"""
221
+ Map-style DataPipe.
222
+
223
+ All datasets that represent a map from keys to data samples should subclass this.
224
+ Subclasses should overwrite :meth:`__getitem__`, supporting fetching a
225
+ data sample for a given, unique key. Subclasses can also optionally overwrite
226
+ :meth:`__len__`, which is expected to return the size of the dataset by many
227
+ :class:`~torch.utils.data.Sampler` implementations and the default options
228
+ of :class:`~torch.utils.data.DataLoader`.
229
+
230
+ These DataPipes can be invoked in two ways, using the class constructor or applying their
231
+ functional form onto an existing `MapDataPipe` (recommend, available to most but not all DataPipes).
232
+
233
+ Note:
234
+ :class:`~torch.utils.data.DataLoader` by default constructs an index
235
+ sampler that yields integral indices. To make it work with a map-style
236
+ DataPipe with non-integral indices/keys, a custom sampler must be provided.
237
+
238
+ Example:
239
+ >>> # xdoctest: +SKIP
240
+ >>> from torchdata.datapipes.map import SequenceWrapper, Mapper
241
+ >>> dp = SequenceWrapper(range(10))
242
+ >>> map_dp_1 = dp.map(lambda x: x + 1) # Using functional form (recommended)
243
+ >>> list(map_dp_1)
244
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
245
+ >>> map_dp_2 = Mapper(dp, lambda x: x + 1) # Using class constructor
246
+ >>> list(map_dp_2)
247
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
248
+ >>> batch_dp = map_dp_1.batch(batch_size=2)
249
+ >>> list(batch_dp)
250
+ [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
251
+ """
252
+
253
+ functions: Dict[str, Callable] = {}
254
+ reduce_ex_hook: Optional[Callable] = None
255
+ getstate_hook: Optional[Callable] = None
256
+ str_hook: Optional[Callable] = None
257
+ repr_hook: Optional[Callable] = None
258
+
259
+ def __getattr__(self, attribute_name):
260
+ if attribute_name in MapDataPipe.functions:
261
+ if attribute_name in _map_deprecated_functional_names:
262
+ kwargs = _map_deprecated_functional_names[attribute_name]
263
+ _deprecation_warning(**kwargs)
264
+ f = MapDataPipe.functions[attribute_name]
265
+ function = functools.partial(f, self)
266
+ functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",))
267
+ return function
268
+ else:
269
+ raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute_name}")
270
+
271
+ @classmethod
272
+ def register_function(cls, function_name, function):
273
+ cls.functions[function_name] = function
274
+
275
+ @classmethod
276
+ def register_datapipe_as_function(cls, function_name, cls_to_register):
277
+ if function_name in cls.functions:
278
+ raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken")
279
+
280
+ def class_function(cls, source_dp, *args, **kwargs):
281
+ result_pipe = cls(source_dp, *args, **kwargs)
282
+ return result_pipe
283
+
284
+ function = functools.partial(class_function, cls_to_register)
285
+ functools.update_wrapper(
286
+ wrapper=function, wrapped=cls_to_register, assigned=("__doc__",)
287
+ )
288
+ cls.functions[function_name] = function
289
+
290
+ def __getstate__(self):
291
+ """
292
+ Serialize `lambda` functions when `dill` is available.
293
+
294
+ If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
295
+ `__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
296
+ """
297
+ state = self.__dict__
298
+ if MapDataPipe.getstate_hook is not None:
299
+ return MapDataPipe.getstate_hook(state)
300
+ return state
301
+
302
+ def __reduce_ex__(self, *args, **kwargs):
303
+ if MapDataPipe.reduce_ex_hook is not None:
304
+ try:
305
+ return MapDataPipe.reduce_ex_hook(self)
306
+ except NotImplementedError:
307
+ pass
308
+ return super().__reduce_ex__(*args, **kwargs)
309
+
310
+ @classmethod
311
+ def set_getstate_hook(cls, hook_fn):
312
+ if MapDataPipe.getstate_hook is not None and hook_fn is not None:
313
+ raise Exception("Attempt to override existing getstate_hook")
314
+ MapDataPipe.getstate_hook = hook_fn
315
+
316
+ @classmethod
317
+ def set_reduce_ex_hook(cls, hook_fn):
318
+ if MapDataPipe.reduce_ex_hook is not None and hook_fn is not None:
319
+ raise Exception("Attempt to override existing reduce_ex_hook")
320
+ MapDataPipe.reduce_ex_hook = hook_fn
321
+
322
+ def __repr__(self):
323
+ if self.repr_hook is not None:
324
+ return self.repr_hook(self)
325
+ # Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
326
+ return str(self.__class__.__qualname__)
327
+
328
+ def __str__(self):
329
+ if self.str_hook is not None:
330
+ return self.str_hook(self)
331
+ # Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
332
+ return str(self.__class__.__qualname__)
333
+
334
+ def __dir__(self):
335
+ # for auto-completion in a REPL (e.g. Jupyter notebook)
336
+ return list(super().__dir__()) + list(self.functions.keys())
337
+
338
+
339
+
340
+ class _DataPipeSerializationWrapper:
341
+ def __init__(self, datapipe):
342
+ self._datapipe = datapipe
343
+
344
+ def __getstate__(self):
345
+ use_dill = False
346
+ try:
347
+ value = pickle.dumps(self._datapipe)
348
+ except Exception:
349
+ if HAS_DILL:
350
+ value = dill.dumps(self._datapipe)
351
+ use_dill = True
352
+ else:
353
+ raise
354
+ return (value, use_dill)
355
+
356
+ def __setstate__(self, state):
357
+ value, use_dill = state
358
+ if use_dill:
359
+ self._datapipe = dill.loads(value)
360
+ else:
361
+ self._datapipe = pickle.loads(value)
362
+
363
+ def __len__(self):
364
+ try:
365
+ return len(self._datapipe)
366
+ except Exception as e:
367
+ raise TypeError(
368
+ f"{type(self).__name__} instance doesn't have valid length"
369
+ ) from e
370
+
371
+
372
+ class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe):
373
+ def __init__(self, datapipe: IterDataPipe[T_co]):
374
+ super().__init__(datapipe)
375
+ self._datapipe_iter: Optional[Iterator[T_co]] = None
376
+
377
+ def __iter__(self) -> "_IterDataPipeSerializationWrapper":
378
+ self._datapipe_iter = iter(self._datapipe)
379
+ return self
380
+
381
+ def __next__(self) -> T_co: # type: ignore[type-var]
382
+ assert self._datapipe_iter is not None
383
+ return next(self._datapipe_iter)
384
+
385
+
386
+ class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe):
387
+ def __getitem__(self, idx):
388
+ return self._datapipe[idx]
389
+
390
+
391
+ class DataChunk(list, Generic[T]):
392
+ def __init__(self, items):
393
+ super().__init__(items)
394
+ self.items = items
395
+
396
+ def as_str(self, indent=''):
397
+ res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]"
398
+ return res
399
+
400
+ def __iter__(self) -> Iterator[T]:
401
+ yield from super().__iter__()
402
+
403
+ def raw_iterator(self) -> T: # type: ignore[misc]
404
+ yield from self.items
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi ADDED
@@ -0,0 +1,689 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This base template ("datapipe.pyi.in") is generated from mypy stubgen with minimal editing for code injection
2
+ # The output file will be "datapipe.pyi". This is executed as part of torch/CMakeLists.txt
3
+ # Note that, for mypy, .pyi file takes precedent over .py file, such that we must define the interface for other
4
+ # classes/objects here, even though we are not injecting extra code into them at the moment.
5
+
6
+ from typing import Any, Callable, Dict, Generic, Iterator, List, Literal, Optional, TypeVar, Union
7
+
8
+ from torch.utils.data import Dataset, default_collate, IterableDataset
9
+ from torch.utils.data.datapipes._hook_iterator import _SnapshotState
10
+ from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta
11
+
12
+ T_co = TypeVar("T_co", covariant=True)
13
+ T = TypeVar("T")
14
+ UNTRACABLE_DATAFRAME_PIPES: Any
15
+
16
+ class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta):
17
+ functions: Dict[str, Callable] = ...
18
+ reduce_ex_hook: Optional[Callable] = ...
19
+ getstate_hook: Optional[Callable] = ...
20
+ str_hook: Optional[Callable] = ...
21
+ repr_hook: Optional[Callable] = ...
22
+ def __getattr__(self, attribute_name: Any): ...
23
+ @classmethod
24
+ def register_function(cls, function_name: Any, function: Any) -> None: ...
25
+ @classmethod
26
+ def register_datapipe_as_function(
27
+ cls,
28
+ function_name: Any,
29
+ cls_to_register: Any,
30
+ ): ...
31
+ def __getstate__(self): ...
32
+ def __reduce_ex__(self, *args: Any, **kwargs: Any): ...
33
+ @classmethod
34
+ def set_getstate_hook(cls, hook_fn: Any) -> None: ...
35
+ @classmethod
36
+ def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ...
37
+ # Functional form of 'BatcherMapDataPipe'
38
+ def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> MapDataPipe:
39
+ r"""
40
+ Create mini-batches of data (functional name: ``batch``).
41
+
42
+ An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``,
43
+ or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``.
44
+
45
+ Args:
46
+ datapipe: Iterable DataPipe being batched
47
+ batch_size: The size of each batch
48
+ drop_last: Option to drop the last batch if it's not full
49
+
50
+ Example:
51
+ >>> # xdoctest: +SKIP
52
+ >>> from torchdata.datapipes.map import SequenceWrapper
53
+ >>> dp = SequenceWrapper(range(10))
54
+ >>> batch_dp = dp.batch(batch_size=2)
55
+ >>> list(batch_dp)
56
+ [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
57
+ """
58
+
59
+ # Functional form of 'ConcaterMapDataPipe'
60
+ def concat(self, *datapipes: MapDataPipe) -> MapDataPipe:
61
+ r"""
62
+ Concatenate multiple Map DataPipes (functional name: ``concat``).
63
+
64
+ The new index of is the cumulative sum of source DataPipes.
65
+ For example, if there are 2 source DataPipes both with length 5,
66
+ index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to
67
+ elements of the first DataPipe, and 5 to 9 would refer to elements
68
+ of the second DataPipe.
69
+
70
+ Args:
71
+ datapipes: Map DataPipes being concatenated
72
+
73
+ Example:
74
+ >>> # xdoctest: +SKIP
75
+ >>> from torchdata.datapipes.map import SequenceWrapper
76
+ >>> dp1 = SequenceWrapper(range(3))
77
+ >>> dp2 = SequenceWrapper(range(3))
78
+ >>> concat_dp = dp1.concat(dp2)
79
+ >>> list(concat_dp)
80
+ [0, 1, 2, 0, 1, 2]
81
+ """
82
+
83
+ # Functional form of 'MapperMapDataPipe'
84
+ def map(self, fn: Callable= ...) -> MapDataPipe:
85
+ r"""
86
+ Apply the input function over each item from the source DataPipe (functional name: ``map``).
87
+
88
+ The function can be any regular Python function or partial object. Lambda
89
+ function is not recommended as it is not supported by pickle.
90
+
91
+ Args:
92
+ datapipe: Source MapDataPipe
93
+ fn: Function being applied to each item
94
+
95
+ Example:
96
+ >>> # xdoctest: +SKIP
97
+ >>> from torchdata.datapipes.map import SequenceWrapper, Mapper
98
+ >>> def add_one(x):
99
+ ... return x + 1
100
+ >>> dp = SequenceWrapper(range(10))
101
+ >>> map_dp_1 = dp.map(add_one)
102
+ >>> list(map_dp_1)
103
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
104
+ >>> map_dp_2 = Mapper(dp, lambda x: x + 1)
105
+ >>> list(map_dp_2)
106
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
107
+ """
108
+
109
+ # Functional form of 'ShufflerIterDataPipe'
110
+ def shuffle(self, *, indices: Optional[List] = None) -> IterDataPipe:
111
+ r"""
112
+ Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``).
113
+
114
+ When it is used with :class:`~torch.utils.data.DataLoader`, the methods to
115
+ set up random seed are different based on :attr:`num_workers`.
116
+
117
+ For single-process mode (:attr:`num_workers == 0`), the random seed is set before
118
+ the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
119
+ mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed
120
+ for each worker process.
121
+
122
+ Args:
123
+ datapipe: MapDataPipe being shuffled
124
+ indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing
125
+
126
+ Example:
127
+ >>> # xdoctest: +SKIP
128
+ >>> from torchdata.datapipes.map import SequenceWrapper
129
+ >>> dp = SequenceWrapper(range(10))
130
+ >>> shuffle_dp = dp.shuffle().set_seed(0)
131
+ >>> list(shuffle_dp)
132
+ [7, 8, 1, 5, 3, 4, 2, 0, 9, 6]
133
+ >>> list(shuffle_dp)
134
+ [6, 1, 9, 5, 2, 4, 7, 3, 8, 0]
135
+ >>> # Reset seed for Shuffler
136
+ >>> shuffle_dp = shuffle_dp.set_seed(0)
137
+ >>> list(shuffle_dp)
138
+ [7, 8, 1, 5, 3, 4, 2, 0, 9, 6]
139
+
140
+ Note:
141
+ Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an
142
+ ``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to
143
+ the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order
144
+ of data during data-processing.
145
+ """
146
+
147
+ # Functional form of 'ZipperMapDataPipe'
148
+ def zip(self, *datapipes: MapDataPipe[T_co]) -> MapDataPipe:
149
+ r"""
150
+ Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
151
+
152
+ This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted.
153
+
154
+ Args:
155
+ *datapipes: Map DataPipes being aggregated
156
+
157
+ Example:
158
+ >>> # xdoctest: +SKIP
159
+ >>> from torchdata.datapipes.map import SequenceWrapper
160
+ >>> dp1 = SequenceWrapper(range(3))
161
+ >>> dp2 = SequenceWrapper(range(10, 13))
162
+ >>> zip_dp = dp1.zip(dp2)
163
+ >>> list(zip_dp)
164
+ [(0, 10), (1, 11), (2, 12)]
165
+ """
166
+
167
+
168
+ class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta):
169
+ functions: Dict[str, Callable] = ...
170
+ reduce_ex_hook: Optional[Callable] = ...
171
+ getstate_hook: Optional[Callable] = ...
172
+ str_hook: Optional[Callable] = ...
173
+ repr_hook: Optional[Callable] = ...
174
+ _number_of_samples_yielded: int = ...
175
+ _snapshot_state: _SnapshotState = _SnapshotState.Iterating
176
+ _fast_forward_iterator: Optional[Iterator] = ...
177
+ def __getattr__(self, attribute_name: Any): ...
178
+ @classmethod
179
+ def register_function(cls, function_name: Any, function: Any) -> None: ...
180
+ @classmethod
181
+ def register_datapipe_as_function(
182
+ cls,
183
+ function_name: Any,
184
+ cls_to_register: Any,
185
+ enable_df_api_tracing: bool = ...,
186
+ ): ...
187
+ def __getstate__(self): ...
188
+ def __reduce_ex__(self, *args: Any, **kwargs: Any): ...
189
+ @classmethod
190
+ def set_getstate_hook(cls, hook_fn: Any) -> None: ...
191
+ @classmethod
192
+ def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ...
193
+ # Functional form of 'BatcherIterDataPipe'
194
+ def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> IterDataPipe:
195
+ r"""
196
+ Creates mini-batches of data (functional name: ``batch``).
197
+
198
+ An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the
199
+ last batch if ``drop_last`` is set to ``False``.
200
+
201
+ Args:
202
+ datapipe: Iterable DataPipe being batched
203
+ batch_size: The size of each batch
204
+ drop_last: Option to drop the last batch if it's not full
205
+ wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding,
206
+ defaults to ``DataChunk``
207
+
208
+ Example:
209
+ >>> # xdoctest: +SKIP
210
+ >>> from torchdata.datapipes.iter import IterableWrapper
211
+ >>> dp = IterableWrapper(range(10))
212
+ >>> dp = dp.batch(batch_size=3, drop_last=True)
213
+ >>> list(dp)
214
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
215
+ """
216
+
217
+ # Functional form of 'CollatorIterDataPipe'
218
+ def collate(self, conversion: Optional[Union[Callable[..., Any],Dict[Union[str, Any], Union[Callable, Any]],]] = default_collate, collate_fn: Optional[Callable] = None) -> IterDataPipe:
219
+ r"""
220
+ Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``).
221
+
222
+ By default, it uses :func:`torch.utils.data.default_collate`.
223
+
224
+ .. note::
225
+ While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the
226
+ default behavior and `functools.partial` to specify any additional arguments.
227
+
228
+ Args:
229
+ datapipe: Iterable DataPipe being collated
230
+ collate_fn: Customized collate function to collect and combine data or a batch of data.
231
+ Default function collates to Tensor(s) based on data type.
232
+
233
+ Example:
234
+ >>> # xdoctest: +SKIP
235
+ >>> # Convert integer data to float Tensor
236
+ >>> class MyIterDataPipe(torch.utils.data.IterDataPipe):
237
+ ... def __init__(self, start, end):
238
+ ... super(MyIterDataPipe).__init__()
239
+ ... assert end > start, "this example code only works with end >= start"
240
+ ... self.start = start
241
+ ... self.end = end
242
+ ...
243
+ ... def __iter__(self):
244
+ ... return iter(range(self.start, self.end))
245
+ ...
246
+ ... def __len__(self):
247
+ ... return self.end - self.start
248
+ ...
249
+ >>> ds = MyIterDataPipe(start=3, end=7)
250
+ >>> print(list(ds))
251
+ [3, 4, 5, 6]
252
+ >>> def collate_fn(batch):
253
+ ... return torch.tensor(batch, dtype=torch.float)
254
+ ...
255
+ >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn)
256
+ >>> print(list(collated_ds))
257
+ [tensor(3.), tensor(4.), tensor(5.), tensor(6.)]
258
+ """
259
+
260
+ # Functional form of 'ConcaterIterDataPipe'
261
+ def concat(self, *datapipes: IterDataPipe) -> IterDataPipe:
262
+ r"""
263
+ Concatenates multiple Iterable DataPipes (functional name: ``concat``).
264
+
265
+ The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones.
266
+
267
+ Args:
268
+ datapipes: Iterable DataPipes being concatenated
269
+
270
+ Example:
271
+ >>> # xdoctest: +REQUIRES(module:torchdata)
272
+ >>> import random
273
+ >>> from torchdata.datapipes.iter import IterableWrapper
274
+ >>> dp1 = IterableWrapper(range(3))
275
+ >>> dp2 = IterableWrapper(range(5))
276
+ >>> list(dp1.concat(dp2))
277
+ [0, 1, 2, 0, 1, 2, 3, 4]
278
+ """
279
+
280
+ # Functional form of 'DemultiplexerIterDataPipe'
281
+ def demux(self, num_instances: int, classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000) -> List[IterDataPipe]:
282
+ r"""
283
+ Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``).
284
+
285
+ A list of the child DataPipes is returned from this operation.
286
+
287
+ Args:
288
+ datapipe: Iterable DataPipe being filtered
289
+ num_instances: number of instances of the DataPipe to create
290
+ classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None``
291
+ drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None``
292
+ buffer_size: this defines the maximum number of inputs that the buffer can hold across all child
293
+ DataPipes while waiting for their values to be yielded.
294
+ Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
295
+
296
+ Examples:
297
+ >>> # xdoctest: +REQUIRES(module:torchdata)
298
+ >>> from torchdata.datapipes.iter import IterableWrapper
299
+ >>> def odd_or_even(n):
300
+ ... return n % 2
301
+ >>> source_dp = IterableWrapper(range(5))
302
+ >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even)
303
+ >>> list(dp1)
304
+ [0, 2, 4]
305
+ >>> list(dp2)
306
+ [1, 3]
307
+ >>> # It can also filter out any element that gets `None` from the `classifier_fn`
308
+ >>> def odd_or_even_no_zero(n):
309
+ ... return n % 2 if n != 0 else None
310
+ >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True)
311
+ >>> list(dp1)
312
+ [2, 4]
313
+ >>> list(dp2)
314
+ [1, 3]
315
+ """
316
+
317
+ # Functional form of 'FilterIterDataPipe'
318
+ def filter(self, filter_fn: Callable, input_col=None) -> IterDataPipe:
319
+ r"""
320
+ Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``).
321
+
322
+ Args:
323
+ datapipe: Iterable DataPipe being filtered
324
+ filter_fn: Customized function mapping an element to a boolean.
325
+ input_col: Index or indices of data which ``filter_fn`` is applied, such as:
326
+
327
+ - ``None`` as default to apply ``filter_fn`` to the data directly.
328
+ - Integer(s) is used for list/tuple.
329
+ - Key(s) is used for dict.
330
+
331
+ Example:
332
+ >>> # xdoctest: +SKIP
333
+ >>> from torchdata.datapipes.iter import IterableWrapper
334
+ >>> def is_even(n):
335
+ ... return n % 2 == 0
336
+ >>> dp = IterableWrapper(range(5))
337
+ >>> filter_dp = dp.filter(filter_fn=is_even)
338
+ >>> list(filter_dp)
339
+ [0, 2, 4]
340
+ """
341
+
342
+ # Functional form of 'ForkerIterDataPipe'
343
+ def fork(self, num_instances: int, buffer_size: int = 1000, copy: Optional[Literal["shallow", "deep"]] = None) -> List[IterDataPipe]:
344
+ r"""
345
+ Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``).
346
+
347
+ Args:
348
+ datapipe: Iterable DataPipe being copied
349
+ num_instances: number of instances of the datapipe to create
350
+ buffer_size: this restricts how far ahead the leading child DataPipe
351
+ can read relative to the slowest child DataPipe.
352
+ Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
353
+ copy: copy strategy to use for items yielded by each branch. Supported
354
+ options are ``None`` for no copying, ``"shallow"`` for shallow object
355
+ copies, and ``"deep"`` for deep object copies. Defaults to ``None``.
356
+
357
+ Note:
358
+ All branches of the forked pipeline return the identical object unless
359
+ the copy parameter is supplied. If the object is mutable or contains
360
+ mutable objects, changing them in one branch will affect all others.
361
+
362
+ Example:
363
+ >>> # xdoctest: +REQUIRES(module:torchdata)
364
+ >>> from torchdata.datapipes.iter import IterableWrapper
365
+ >>> source_dp = IterableWrapper(range(5))
366
+ >>> dp1, dp2 = source_dp.fork(num_instances=2)
367
+ >>> list(dp1)
368
+ [0, 1, 2, 3, 4]
369
+ >>> list(dp2)
370
+ [0, 1, 2, 3, 4]
371
+ """
372
+
373
+ # Functional form of 'GrouperIterDataPipe'
374
+ def groupby(self, group_key_fn: Callable[[T_co], Any], *, keep_key: bool = False, buffer_size: int = 10000, group_size: Optional[int] = None, guaranteed_group_size: Optional[int] = None, drop_remaining: bool = False) -> IterDataPipe:
375
+ r"""
376
+ Groups data from IterDataPipe by keys from ``group_key_fn``, yielding a ``DataChunk`` with batch size up to ``group_size``.
377
+
378
+ (functional name: ``groupby``).
379
+
380
+ The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group
381
+ will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full,
382
+ the DataPipe will yield the largest batch with the same key, provided that its size is larger
383
+ than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``.
384
+
385
+ After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity
386
+ will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``.
387
+
388
+ Args:
389
+ datapipe: Iterable datapipe to be grouped
390
+ group_key_fn: Function used to generate group key from the data of the source datapipe
391
+ keep_key: Option to yield the matching key along with the items in a tuple,
392
+ resulting in `(key, [items])` otherwise returning [items]
393
+ buffer_size: The size of buffer for ungrouped data
394
+ group_size: The max size of each group, a batch is yielded as soon as it reaches this size
395
+ guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full
396
+ drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer
397
+ when the buffer is full
398
+
399
+ Example:
400
+ >>> import os
401
+ >>> # xdoctest: +SKIP
402
+ >>> from torchdata.datapipes.iter import IterableWrapper
403
+ >>> def group_fn(file):
404
+ ... return os.path.basename(file).split(".")[0]
405
+ >>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"])
406
+ >>> dp0 = source_dp.groupby(group_key_fn=group_fn)
407
+ >>> list(dp0)
408
+ [['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']]
409
+ >>> # A group is yielded as soon as its size equals to `group_size`
410
+ >>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2)
411
+ >>> list(dp1)
412
+ [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
413
+ >>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size`
414
+ >>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2)
415
+ >>> list(dp2)
416
+ [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
417
+ """
418
+
419
+ # Functional form of 'FileListerIterDataPipe'
420
+ def list_files(self, masks: Union[str, List[str]] = '', *, recursive: bool = False, abspath: bool = False, non_deterministic: bool = False, length: int = -1) -> IterDataPipe:
421
+ r"""
422
+ Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.
423
+
424
+ Multiple root directories can be provided (functional name: ``list_files``).
425
+
426
+ Args:
427
+ root: Root directory or a sequence of root directories
428
+ masks: Unix style filter string or string list for filtering file name(s)
429
+ recursive: Whether to return pathname from nested directories or not
430
+ abspath: Whether to return relative pathname or absolute pathname
431
+ non_deterministic: Whether to return pathname in sorted order or not.
432
+ If ``False``, the results yielded from each root directory will be sorted
433
+ length: Nominal length of the datapipe
434
+
435
+ Example:
436
+ >>> # xdoctest: +SKIP
437
+ >>> from torchdata.datapipes.iter import FileLister
438
+ >>> dp = FileLister(root=".", recursive=True)
439
+ >>> list(dp)
440
+ ['example.py', './data/data.tar']
441
+ """
442
+
443
+ # Functional form of 'MapperIterDataPipe'
444
+ def map(self, fn: Callable, input_col=None, output_col=None) -> IterDataPipe:
445
+ r"""
446
+ Applies a function over each item from the source DataPipe (functional name: ``map``).
447
+
448
+ The function can be any regular Python function or partial object. Lambda
449
+ function is not recommended as it is not supported by pickle.
450
+
451
+ Args:
452
+ datapipe: Source Iterable DataPipe
453
+ fn: Function being applied over each item
454
+ input_col: Index or indices of data which ``fn`` is applied, such as:
455
+
456
+ - ``None`` as default to apply ``fn`` to the data directly.
457
+ - Integer(s) is used for list/tuple.
458
+ - Key(s) is used for dict.
459
+
460
+ output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified
461
+ only when ``input_col`` is not ``None``
462
+
463
+ - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with
464
+ multiple indices, the left-most one is used, and other indices will be removed.
465
+ - Integer is used for list/tuple. ``-1`` represents to append result at the end.
466
+ - Key is used for dict. New key is acceptable.
467
+
468
+ Example:
469
+ >>> # xdoctest: +SKIP
470
+ >>> from torchdata.datapipes.iter import IterableWrapper, Mapper
471
+ >>> def add_one(x):
472
+ ... return x + 1
473
+ >>> dp = IterableWrapper(range(10))
474
+ >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred
475
+ >>> list(map_dp_1)
476
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
477
+ >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle`
478
+ >>> # Use `functools.partial` or explicitly define the function instead
479
+ >>> map_dp_2 = Mapper(dp, lambda x: x + 1)
480
+ >>> list(map_dp_2)
481
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
482
+ """
483
+
484
+ # Functional form of 'MultiplexerIterDataPipe'
485
+ def mux(self, *datapipes) -> IterDataPipe:
486
+ r"""
487
+ Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``).
488
+
489
+ As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration,
490
+ and so on. It ends when the shortest input DataPipe is exhausted.
491
+
492
+ Args:
493
+ datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted
494
+
495
+ Example:
496
+ >>> # xdoctest: +REQUIRES(module:torchdata)
497
+ >>> from torchdata.datapipes.iter import IterableWrapper
498
+ >>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
499
+ >>> list(dp1.mux(dp2, dp3))
500
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
501
+ """
502
+
503
+ # Functional form of 'FileOpenerIterDataPipe'
504
+ def open_files(self, mode: str = 'r', encoding: Optional[str] = None, length: int = -1) -> IterDataPipe:
505
+ r"""
506
+ Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``).
507
+
508
+ Args:
509
+ datapipe: Iterable datapipe that provides pathnames
510
+ mode: An optional string that specifies the mode in which
511
+ the file is opened by ``open()``. It defaults to ``r``, other options are
512
+ ``b`` for reading in binary mode and ``t`` for text mode.
513
+ encoding: An optional string that specifies the encoding of the
514
+ underlying file. It defaults to ``None`` to match the default encoding of ``open``.
515
+ length: Nominal length of the datapipe
516
+
517
+ Note:
518
+ The opened file handles will be closed by Python's GC periodically. Users can choose
519
+ to close them explicitly.
520
+
521
+ Example:
522
+ >>> # xdoctest: +SKIP
523
+ >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader
524
+ >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt'))
525
+ >>> dp = FileOpener(dp)
526
+ >>> dp = StreamReader(dp)
527
+ >>> list(dp)
528
+ [('./abc.txt', 'abc')]
529
+ """
530
+
531
+ # Functional form of 'StreamReaderIterDataPipe'
532
+ def read_from_stream(self, chunk=None) -> IterDataPipe:
533
+ r"""
534
+ Given IO streams and their label names, yield bytes with label name as tuple.
535
+
536
+ (functional name: ``read_from_stream``).
537
+
538
+ Args:
539
+ datapipe: Iterable DataPipe provides label/URL and byte stream
540
+ chunk: Number of bytes to be read from stream per iteration.
541
+ If ``None``, all bytes will be read until the EOF.
542
+
543
+ Example:
544
+ >>> # xdoctest: +SKIP
545
+ >>> from torchdata.datapipes.iter import IterableWrapper, StreamReader
546
+ >>> from io import StringIO
547
+ >>> dp = IterableWrapper([("alphabet", StringIO("abcde"))])
548
+ >>> list(StreamReader(dp, chunk=1))
549
+ [('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')]
550
+ """
551
+
552
+ # Functional form of 'RoutedDecoderIterDataPipe'
553
+ def routed_decode(self, *handlers: Callable, key_fn: Callable= ...) -> IterDataPipe:
554
+ r"""
555
+ Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple.
556
+
557
+ (functional name: ``routed_decode``)
558
+
559
+ Args:
560
+ datapipe: Iterable datapipe that provides pathname and binary stream in tuples
561
+ handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder
562
+ handlers will be set as default. If multiple handles are provided, the priority
563
+ order follows the order of handlers (the first handler has the top priority)
564
+ key_fn: Function for decoder to extract key from pathname to dispatch handlers.
565
+ Default is set to extract file extension from pathname
566
+
567
+ Note:
568
+ When ``key_fn`` is specified returning anything other than extension, the default
569
+ handler will not work and users need to specify custom handler. Custom handler
570
+ could use regex to determine the eligibility to handle data.
571
+ """
572
+
573
+ # Functional form of 'ShardingFilterIterDataPipe'
574
+ def sharding_filter(self, sharding_group_filter=None) -> IterDataPipe:
575
+ r"""
576
+ Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``).
577
+
578
+ After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the
579
+ original DataPipe, where `n` equals to the number of instances.
580
+
581
+ Args:
582
+ source_datapipe: Iterable DataPipe that will be sharded
583
+ """
584
+
585
+ # Functional form of 'ShufflerIterDataPipe'
586
+ def shuffle(self, *, buffer_size: int = 10000, unbatch_level: int = 0) -> IterDataPipe:
587
+ r"""
588
+ Shuffle the input DataPipe with a buffer (functional name: ``shuffle``).
589
+
590
+ The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then,
591
+ each item will be yielded from the buffer by reservoir sampling via iterator.
592
+
593
+ ``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the
594
+ datapipe is not shuffled. In order to fully shuffle all elements from datapipe,
595
+ ``buffer_size`` is required to be greater than or equal to the size of datapipe.
596
+
597
+ When it is used with :class:`torch.utils.data.DataLoader`, the methods to
598
+ set up random seed are different based on :attr:`num_workers`.
599
+
600
+ For single-process mode (:attr:`num_workers == 0`), the random seed is set before
601
+ the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
602
+ mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed
603
+ for each worker process.
604
+
605
+ Args:
606
+ datapipe: The IterDataPipe being shuffled
607
+ buffer_size: The buffer size for shuffling (default to ``10000``)
608
+ unbatch_level: Specifies if it is necessary to unbatch source data before
609
+ applying the shuffle
610
+
611
+ Example:
612
+ >>> # xdoctest: +SKIP
613
+ >>> from torchdata.datapipes.iter import IterableWrapper
614
+ >>> dp = IterableWrapper(range(10))
615
+ >>> shuffle_dp = dp.shuffle()
616
+ >>> list(shuffle_dp)
617
+ [0, 4, 1, 6, 3, 2, 9, 5, 7, 8]
618
+ """
619
+
620
+ # Functional form of 'UnBatcherIterDataPipe'
621
+ def unbatch(self, unbatch_level: int = 1) -> IterDataPipe:
622
+ r"""
623
+ Undos batching of data (functional name: ``unbatch``).
624
+
625
+ In other words, it flattens the data up to the specified level within a batched DataPipe.
626
+
627
+ Args:
628
+ datapipe: Iterable DataPipe being un-batched
629
+ unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``,
630
+ it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe.
631
+
632
+ Example:
633
+ >>> # xdoctest: +SKIP
634
+ >>> from torchdata.datapipes.iter import IterableWrapper
635
+ >>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]])
636
+ >>> dp1 = source_dp.unbatch()
637
+ >>> list(dp1)
638
+ [[0, 1], [2], [3, 4], [5], [6]]
639
+ >>> dp2 = source_dp.unbatch(unbatch_level=2)
640
+ >>> list(dp2)
641
+ [0, 1, 2, 3, 4, 5, 6]
642
+ """
643
+
644
+ # Functional form of 'ZipperIterDataPipe'
645
+ def zip(self, *datapipes: IterDataPipe) -> IterDataPipe:
646
+ r"""
647
+ Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
648
+
649
+ The output is stopped as soon as the shortest input DataPipe is exhausted.
650
+
651
+ Args:
652
+ *datapipes: Iterable DataPipes being aggregated
653
+
654
+ Example:
655
+ >>> # xdoctest: +REQUIRES(module:torchdata)
656
+ >>> from torchdata.datapipes.iter import IterableWrapper
657
+ >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
658
+ >>> list(dp1.zip(dp2, dp3))
659
+ [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)]
660
+ """
661
+
662
+
663
+ class DFIterDataPipe(IterDataPipe):
664
+ def _is_dfpipe(self): ...
665
+ def __iter__(self): ...
666
+
667
+ class _DataPipeSerializationWrapper:
668
+ def __init__(self, datapipe): ...
669
+ def __getstate__(self): ...
670
+ def __setstate__(self, state): ...
671
+ def __len__(self): ...
672
+
673
+ class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe):
674
+ def __iter__(self): ...
675
+
676
+ class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe):
677
+ def __getitem__(self, idx): ...
678
+
679
+ class DataChunk(list, Generic[T]):
680
+ def __init__(self, items):
681
+ super().__init__(items)
682
+ self.items = items
683
+ def as_str(self, indent: str = "") -> str:
684
+ res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]"
685
+ return res
686
+ def __iter__(self) -> Iterator[T]:
687
+ yield from super().__iter__()
688
+ def raw_iterator(self) -> T: # type: ignore[misc]
689
+ yield from self.items
llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ from collections import defaultdict
4
+ from typing import Any, Dict, List, Set, Tuple, Union
5
+
6
+
7
+ def materialize_lines(lines: List[str], indentation: int) -> str:
8
+ output = ""
9
+ new_line_with_indent = "\n" + " " * indentation
10
+ for i, line in enumerate(lines):
11
+ if i != 0:
12
+ output += new_line_with_indent
13
+ output += line.replace('\n', new_line_with_indent)
14
+ return output
15
+
16
+
17
+ def gen_from_template(dir: str, template_name: str, output_name: str, replacements: List[Tuple[str, Any, int]]):
18
+
19
+ template_path = os.path.join(dir, template_name)
20
+ output_path = os.path.join(dir, output_name)
21
+
22
+ with open(template_path) as f:
23
+ content = f.read()
24
+ for placeholder, lines, indentation in replacements:
25
+ with open(output_path, "w") as f:
26
+ content = content.replace(placeholder, materialize_lines(lines, indentation))
27
+ f.write(content)
28
+
29
+
30
+ def find_file_paths(dir_paths: List[str], files_to_exclude: Set[str]) -> Set[str]:
31
+ """
32
+ When given a path to a directory, returns the paths to the relevant files within it.
33
+
34
+ This function does NOT recursive traverse to subdirectories.
35
+ """
36
+ paths: Set[str] = set()
37
+ for dir_path in dir_paths:
38
+ all_files = os.listdir(dir_path)
39
+ python_files = {fname for fname in all_files if ".py" == fname[-3:]}
40
+ filter_files = {fname for fname in python_files if fname not in files_to_exclude}
41
+ paths.update({os.path.join(dir_path, fname) for fname in filter_files})
42
+ return paths
43
+
44
+
45
+ def extract_method_name(line: str) -> str:
46
+ """Extract method name from decorator in the form of "@functional_datapipe({method_name})"."""
47
+ if "(\"" in line:
48
+ start_token, end_token = "(\"", "\")"
49
+ elif "(\'" in line:
50
+ start_token, end_token = "(\'", "\')"
51
+ else:
52
+ raise RuntimeError(f"Unable to find appropriate method name within line:\n{line}")
53
+ start, end = line.find(start_token) + len(start_token), line.find(end_token)
54
+ return line[start:end]
55
+
56
+
57
+ def extract_class_name(line: str) -> str:
58
+ """Extract class name from class definition in the form of "class {CLASS_NAME}({Type}):"."""
59
+ start_token = "class "
60
+ end_token = "("
61
+ start, end = line.find(start_token) + len(start_token), line.find(end_token)
62
+ return line[start:end]
63
+
64
+
65
+ def parse_datapipe_file(file_path: str) -> Tuple[Dict[str, str], Dict[str, str], Set[str], Dict[str, List[str]]]:
66
+ """Given a path to file, parses the file and returns a dictionary of method names to function signatures."""
67
+ method_to_signature, method_to_class_name, special_output_type = {}, {}, set()
68
+ doc_string_dict = defaultdict(list)
69
+ with open(file_path) as f:
70
+ open_paren_count = 0
71
+ method_name, class_name, signature = "", "", ""
72
+ skip = False
73
+ for line in f:
74
+ if line.count("\"\"\"") % 2 == 1:
75
+ skip = not skip
76
+ if skip or "\"\"\"" in line: # Saving docstrings
77
+ doc_string_dict[method_name].append(line)
78
+ continue
79
+ if "@functional_datapipe" in line:
80
+ method_name = extract_method_name(line)
81
+ doc_string_dict[method_name] = []
82
+ continue
83
+ if method_name and "class " in line:
84
+ class_name = extract_class_name(line)
85
+ continue
86
+ if method_name and ("def __init__(" in line or "def __new__(" in line):
87
+ if "def __new__(" in line:
88
+ special_output_type.add(method_name)
89
+ open_paren_count += 1
90
+ start = line.find("(") + len("(")
91
+ line = line[start:]
92
+ if open_paren_count > 0:
93
+ open_paren_count += line.count('(')
94
+ open_paren_count -= line.count(')')
95
+ if open_paren_count == 0:
96
+ end = line.rfind(')')
97
+ signature += line[:end]
98
+ method_to_signature[method_name] = process_signature(signature)
99
+ method_to_class_name[method_name] = class_name
100
+ method_name, class_name, signature = "", "", ""
101
+ elif open_paren_count < 0:
102
+ raise RuntimeError("open parenthesis count < 0. This shouldn't be possible.")
103
+ else:
104
+ signature += line.strip('\n').strip(' ')
105
+ return method_to_signature, method_to_class_name, special_output_type, doc_string_dict
106
+
107
+
108
+ def parse_datapipe_files(file_paths: Set[str]) -> Tuple[Dict[str, str], Dict[str, str], Set[str], Dict[str, List[str]]]:
109
+ methods_and_signatures, methods_and_class_names, methods_with_special_output_types = {}, {}, set()
110
+ methods_and_doc_strings = {}
111
+ for path in file_paths:
112
+ (
113
+ method_to_signature,
114
+ method_to_class_name,
115
+ methods_needing_special_output_types,
116
+ doc_string_dict,
117
+ ) = parse_datapipe_file(path)
118
+ methods_and_signatures.update(method_to_signature)
119
+ methods_and_class_names.update(method_to_class_name)
120
+ methods_with_special_output_types.update(methods_needing_special_output_types)
121
+ methods_and_doc_strings.update(doc_string_dict)
122
+ return methods_and_signatures, methods_and_class_names, methods_with_special_output_types, methods_and_doc_strings
123
+
124
+
125
+ def split_outside_bracket(line: str, delimiter: str = ",") -> List[str]:
126
+ """Given a line of text, split it on comma unless the comma is within a bracket '[]'."""
127
+ bracket_count = 0
128
+ curr_token = ""
129
+ res = []
130
+ for char in line:
131
+ if char == "[":
132
+ bracket_count += 1
133
+ elif char == "]":
134
+ bracket_count -= 1
135
+ elif char == delimiter and bracket_count == 0:
136
+ res.append(curr_token)
137
+ curr_token = ""
138
+ continue
139
+ curr_token += char
140
+ res.append(curr_token)
141
+ return res
142
+
143
+
144
+ def process_signature(line: str) -> str:
145
+ """
146
+ Clean up a given raw function signature.
147
+
148
+ This includes removing the self-referential datapipe argument, default
149
+ arguments of input functions, newlines, and spaces.
150
+ """
151
+ tokens: List[str] = split_outside_bracket(line)
152
+ for i, token in enumerate(tokens):
153
+ tokens[i] = token.strip(' ')
154
+ if token == "cls":
155
+ tokens[i] = "self"
156
+ elif i > 0 and ("self" == tokens[i - 1]) and (tokens[i][0] != "*"):
157
+ # Remove the datapipe after 'self' or 'cls' unless it has '*'
158
+ tokens[i] = ""
159
+ elif "Callable =" in token: # Remove default argument if it is a function
160
+ head, default_arg = token.rsplit("=", 2)
161
+ tokens[i] = head.strip(' ') + "= ..."
162
+ tokens = [t for t in tokens if t != ""]
163
+ line = ', '.join(tokens)
164
+ return line
165
+
166
+
167
+ def get_method_definitions(file_path: Union[str, List[str]],
168
+ files_to_exclude: Set[str],
169
+ deprecated_files: Set[str],
170
+ default_output_type: str,
171
+ method_to_special_output_type: Dict[str, str],
172
+ root: str = "") -> List[str]:
173
+ """
174
+ #.pyi generation for functional DataPipes Process.
175
+
176
+ # 1. Find files that we want to process (exclude the ones who don't)
177
+ # 2. Parse method name and signature
178
+ # 3. Remove first argument after self (unless it is "*datapipes"), default args, and spaces
179
+ """
180
+ if root == "":
181
+ root = str(pathlib.Path(__file__).parent.resolve())
182
+ file_path = [file_path] if isinstance(file_path, str) else file_path
183
+ file_path = [os.path.join(root, path) for path in file_path]
184
+ file_paths = find_file_paths(file_path,
185
+ files_to_exclude=files_to_exclude.union(deprecated_files))
186
+ methods_and_signatures, methods_and_class_names, methods_w_special_output_types, methods_and_doc_strings = \
187
+ parse_datapipe_files(file_paths)
188
+
189
+ for fn_name in method_to_special_output_type:
190
+ if fn_name not in methods_w_special_output_types:
191
+ methods_w_special_output_types.add(fn_name)
192
+
193
+ method_definitions = []
194
+ for method_name, arguments in methods_and_signatures.items():
195
+ class_name = methods_and_class_names[method_name]
196
+ if method_name in methods_w_special_output_types:
197
+ output_type = method_to_special_output_type[method_name]
198
+ else:
199
+ output_type = default_output_type
200
+ doc_string = "".join(methods_and_doc_strings[method_name])
201
+ if doc_string == "":
202
+ doc_string = " ...\n"
203
+ method_definitions.append(f"# Functional form of '{class_name}'\n"
204
+ f"def {method_name}({arguments}) -> {output_type}:\n"
205
+ f"{doc_string}")
206
+ method_definitions.sort(key=lambda s: s.split('\n')[1]) # sorting based on method_name
207
+
208
+ return method_definitions
209
+
210
+
211
+ # Defined outside of main() so they can be imported by TorchData
212
+ iterDP_file_path: str = "iter"
213
+ iterDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"}
214
+ iterDP_deprecated_files: Set[str] = set()
215
+ iterDP_method_to_special_output_type: Dict[str, str] = {"demux": "List[IterDataPipe]", "fork": "List[IterDataPipe]"}
216
+
217
+ mapDP_file_path: str = "map"
218
+ mapDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"}
219
+ mapDP_deprecated_files: Set[str] = set()
220
+ mapDP_method_to_special_output_type: Dict[str, str] = {"shuffle": "IterDataPipe"}
221
+
222
+
223
+ def main() -> None:
224
+ """
225
+ # Inject file into template datapipe.pyi.in.
226
+
227
+ TODO: The current implementation of this script only generates interfaces for built-in methods. To generate
228
+ interface for user-defined DataPipes, consider changing `IterDataPipe.register_datapipe_as_function`.
229
+ """
230
+ iter_method_definitions = get_method_definitions(iterDP_file_path, iterDP_files_to_exclude, iterDP_deprecated_files,
231
+ "IterDataPipe", iterDP_method_to_special_output_type)
232
+
233
+ map_method_definitions = get_method_definitions(mapDP_file_path, mapDP_files_to_exclude, mapDP_deprecated_files,
234
+ "MapDataPipe", mapDP_method_to_special_output_type)
235
+
236
+ path = pathlib.Path(__file__).parent.resolve()
237
+ replacements = [('${IterDataPipeMethods}', iter_method_definitions, 4),
238
+ ('${MapDataPipeMethods}', map_method_definitions, 4)]
239
+ gen_from_template(dir=str(path),
240
+ template_name="datapipe.pyi.in",
241
+ output_name="datapipe.pyi",
242
+ replacements=replacements)
243
+
244
+
245
+ if __name__ == '__main__':
246
+ main()
llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataset.py ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bisect
2
+ import itertools
3
+ import math
4
+ import warnings
5
+ from typing import (
6
+ cast,
7
+ Dict,
8
+ Generic,
9
+ Iterable,
10
+ List,
11
+ Optional,
12
+ Sequence,
13
+ Tuple,
14
+ TypeVar,
15
+ Union,
16
+ )
17
+
18
+ # No 'default_generator' in torch/__init__.pyi
19
+ from torch import default_generator, randperm
20
+
21
+ from ... import Generator, Tensor
22
+
23
+ __all__ = [
24
+ "Dataset",
25
+ "IterableDataset",
26
+ "TensorDataset",
27
+ "StackDataset",
28
+ "ConcatDataset",
29
+ "ChainDataset",
30
+ "Subset",
31
+ "random_split",
32
+ ]
33
+
34
+ T_co = TypeVar("T_co", covariant=True)
35
+ T = TypeVar("T")
36
+ T_dict = Dict[str, T_co]
37
+ T_tuple = Tuple[T_co, ...]
38
+ T_stack = TypeVar("T_stack", T_tuple, T_dict)
39
+
40
+
41
+ class Dataset(Generic[T_co]):
42
+ r"""An abstract class representing a :class:`Dataset`.
43
+
44
+ All datasets that represent a map from keys to data samples should subclass
45
+ it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a
46
+ data sample for a given key. Subclasses could also optionally overwrite
47
+ :meth:`__len__`, which is expected to return the size of the dataset by many
48
+ :class:`~torch.utils.data.Sampler` implementations and the default options
49
+ of :class:`~torch.utils.data.DataLoader`. Subclasses could also
50
+ optionally implement :meth:`__getitems__`, for speedup batched samples
51
+ loading. This method accepts list of indices of samples of batch and returns
52
+ list of samples.
53
+
54
+ .. note::
55
+ :class:`~torch.utils.data.DataLoader` by default constructs an index
56
+ sampler that yields integral indices. To make it work with a map-style
57
+ dataset with non-integral indices/keys, a custom sampler must be provided.
58
+ """
59
+
60
+ def __getitem__(self, index) -> T_co:
61
+ raise NotImplementedError("Subclasses of Dataset should implement __getitem__.")
62
+
63
+ # def __getitems__(self, indices: List) -> List[T_co]:
64
+ # Not implemented to prevent false-positives in fetcher check in
65
+ # torch.utils.data._utils.fetch._MapDatasetFetcher
66
+
67
+ def __add__(self, other: "Dataset[T_co]") -> "ConcatDataset[T_co]":
68
+ return ConcatDataset([self, other])
69
+
70
+ # No `def __len__(self)` default?
71
+ # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
72
+ # in pytorch/torch/utils/data/sampler.py
73
+
74
+
75
+ class IterableDataset(Dataset[T_co], Iterable[T_co]):
76
+ r"""An iterable Dataset.
77
+
78
+ All datasets that represent an iterable of data samples should subclass it.
79
+ Such form of datasets is particularly useful when data come from a stream.
80
+
81
+ All subclasses should overwrite :meth:`__iter__`, which would return an
82
+ iterator of samples in this dataset.
83
+
84
+ When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
85
+ item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader`
86
+ iterator. When :attr:`num_workers > 0`, each worker process will have a
87
+ different copy of the dataset object, so it is often desired to configure
88
+ each copy independently to avoid having duplicate data returned from the
89
+ workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
90
+ process, returns information about the worker. It can be used in either the
91
+ dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
92
+ :attr:`worker_init_fn` option to modify each copy's behavior.
93
+
94
+ Example 1: splitting workload across all workers in :meth:`__iter__`::
95
+
96
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
97
+ >>> # xdoctest: +SKIP("Fails on MacOS12")
98
+ >>> class MyIterableDataset(torch.utils.data.IterableDataset):
99
+ ... def __init__(self, start, end):
100
+ ... super(MyIterableDataset).__init__()
101
+ ... assert end > start, "this example code only works with end >= start"
102
+ ... self.start = start
103
+ ... self.end = end
104
+ ...
105
+ ... def __iter__(self):
106
+ ... worker_info = torch.utils.data.get_worker_info()
107
+ ... if worker_info is None: # single-process data loading, return the full iterator
108
+ ... iter_start = self.start
109
+ ... iter_end = self.end
110
+ ... else: # in a worker process
111
+ ... # split workload
112
+ ... per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
113
+ ... worker_id = worker_info.id
114
+ ... iter_start = self.start + worker_id * per_worker
115
+ ... iter_end = min(iter_start + per_worker, self.end)
116
+ ... return iter(range(iter_start, iter_end))
117
+ ...
118
+ >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
119
+ >>> ds = MyIterableDataset(start=3, end=7)
120
+
121
+ >>> # Single-process loading
122
+ >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
123
+ [tensor([3]), tensor([4]), tensor([5]), tensor([6])]
124
+
125
+ >>> # xdoctest: +REQUIRES(POSIX)
126
+ >>> # Mult-process loading with two worker processes
127
+ >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6].
128
+ >>> # xdoctest: +IGNORE_WANT("non deterministic")
129
+ >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
130
+ [tensor([3]), tensor([5]), tensor([4]), tensor([6])]
131
+
132
+ >>> # With even more workers
133
+ >>> # xdoctest: +IGNORE_WANT("non deterministic")
134
+ >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12)))
135
+ [tensor([3]), tensor([5]), tensor([4]), tensor([6])]
136
+
137
+ Example 2: splitting workload across all workers using :attr:`worker_init_fn`::
138
+
139
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
140
+ >>> class MyIterableDataset(torch.utils.data.IterableDataset):
141
+ ... def __init__(self, start, end):
142
+ ... super(MyIterableDataset).__init__()
143
+ ... assert end > start, "this example code only works with end >= start"
144
+ ... self.start = start
145
+ ... self.end = end
146
+ ...
147
+ ... def __iter__(self):
148
+ ... return iter(range(self.start, self.end))
149
+ ...
150
+ >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
151
+ >>> ds = MyIterableDataset(start=3, end=7)
152
+
153
+ >>> # Single-process loading
154
+ >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
155
+ [3, 4, 5, 6]
156
+ >>>
157
+ >>> # Directly doing multi-process loading yields duplicate data
158
+ >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
159
+ [3, 3, 4, 4, 5, 5, 6, 6]
160
+
161
+ >>> # Define a `worker_init_fn` that configures each dataset copy differently
162
+ >>> def worker_init_fn(worker_id):
163
+ ... worker_info = torch.utils.data.get_worker_info()
164
+ ... dataset = worker_info.dataset # the dataset copy in this worker process
165
+ ... overall_start = dataset.start
166
+ ... overall_end = dataset.end
167
+ ... # configure the dataset to only process the split workload
168
+ ... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
169
+ ... worker_id = worker_info.id
170
+ ... dataset.start = overall_start + worker_id * per_worker
171
+ ... dataset.end = min(dataset.start + per_worker, overall_end)
172
+ ...
173
+
174
+ >>> # Mult-process loading with the custom `worker_init_fn`
175
+ >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6].
176
+ >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn)))
177
+ [3, 5, 4, 6]
178
+
179
+ >>> # With even more workers
180
+ >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12, worker_init_fn=worker_init_fn)))
181
+ [3, 4, 5, 6]
182
+ """
183
+
184
+ def __add__(self, other: Dataset[T_co]):
185
+ return ChainDataset([self, other])
186
+
187
+ # No `def __len__(self)` default? Subclasses raise `TypeError` when needed.
188
+ # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
189
+
190
+
191
+ class TensorDataset(Dataset[Tuple[Tensor, ...]]):
192
+ r"""Dataset wrapping tensors.
193
+
194
+ Each sample will be retrieved by indexing tensors along the first dimension.
195
+
196
+ Args:
197
+ *tensors (Tensor): tensors that have the same size of the first dimension.
198
+ """
199
+
200
+ tensors: Tuple[Tensor, ...]
201
+
202
+ def __init__(self, *tensors: Tensor) -> None:
203
+ assert all(
204
+ tensors[0].size(0) == tensor.size(0) for tensor in tensors
205
+ ), "Size mismatch between tensors"
206
+ self.tensors = tensors
207
+
208
+ def __getitem__(self, index):
209
+ return tuple(tensor[index] for tensor in self.tensors)
210
+
211
+ def __len__(self):
212
+ return self.tensors[0].size(0)
213
+
214
+
215
+ class StackDataset(Dataset[T_stack]):
216
+ r"""Dataset as a stacking of multiple datasets.
217
+
218
+ This class is useful to assemble different parts of complex input data, given as datasets.
219
+
220
+ Example:
221
+ >>> # xdoctest: +SKIP
222
+ >>> images = ImageDataset()
223
+ >>> texts = TextDataset()
224
+ >>> tuple_stack = StackDataset(images, texts)
225
+ >>> tuple_stack[0] == (images[0], texts[0])
226
+ >>> dict_stack = StackDataset(image=images, text=texts)
227
+ >>> dict_stack[0] == {'image': images[0], 'text': texts[0]}
228
+
229
+ Args:
230
+ *args (Dataset): Datasets for stacking returned as tuple.
231
+ **kwargs (Dataset): Datasets for stacking returned as dict.
232
+ """
233
+
234
+ datasets: Union[tuple, dict]
235
+
236
+ def __init__(self, *args: Dataset[T_co], **kwargs: Dataset[T_co]) -> None:
237
+ if args:
238
+ if kwargs:
239
+ raise ValueError(
240
+ "Supported either ``tuple``- (via ``args``) or"
241
+ "``dict``- (via ``kwargs``) like input/output, but both types are given."
242
+ )
243
+ self._length = len(args[0]) # type: ignore[arg-type]
244
+ if any(self._length != len(dataset) for dataset in args): # type: ignore[arg-type]
245
+ raise ValueError("Size mismatch between datasets")
246
+ self.datasets = args
247
+ elif kwargs:
248
+ tmp = list(kwargs.values())
249
+ self._length = len(tmp[0]) # type: ignore[arg-type]
250
+ if any(self._length != len(dataset) for dataset in tmp): # type: ignore[arg-type]
251
+ raise ValueError("Size mismatch between datasets")
252
+ self.datasets = kwargs
253
+ else:
254
+ raise ValueError("At least one dataset should be passed")
255
+
256
+ def __getitem__(self, index):
257
+ if isinstance(self.datasets, dict):
258
+ return {k: dataset[index] for k, dataset in self.datasets.items()}
259
+ return tuple(dataset[index] for dataset in self.datasets)
260
+
261
+ def __getitems__(self, indices: list):
262
+ # add batched sampling support when parent datasets supports it.
263
+ if isinstance(self.datasets, dict):
264
+ dict_batch: List[T_dict] = [{} for _ in indices]
265
+ for k, dataset in self.datasets.items():
266
+ if callable(getattr(dataset, "__getitems__", None)):
267
+ items = dataset.__getitems__(indices) # type: ignore[attr-defined]
268
+ if len(items) != len(indices):
269
+ raise ValueError(
270
+ "Nested dataset's output size mismatch."
271
+ f" Expected {len(indices)}, got {len(items)}"
272
+ )
273
+ for data, d_sample in zip(items, dict_batch):
274
+ d_sample[k] = data
275
+ else:
276
+ for idx, d_sample in zip(indices, dict_batch):
277
+ d_sample[k] = dataset[idx]
278
+ return dict_batch
279
+
280
+ # tuple data
281
+ list_batch: List[list] = [[] for _ in indices]
282
+ for dataset in self.datasets:
283
+ if callable(getattr(dataset, "__getitems__", None)):
284
+ items = dataset.__getitems__(indices) # type: ignore[attr-defined]
285
+ if len(items) != len(indices):
286
+ raise ValueError(
287
+ "Nested dataset's output size mismatch."
288
+ f" Expected {len(indices)}, got {len(items)}"
289
+ )
290
+ for data, t_sample in zip(items, list_batch):
291
+ t_sample.append(data)
292
+ else:
293
+ for idx, t_sample in zip(indices, list_batch):
294
+ t_sample.append(dataset[idx])
295
+ tuple_batch: List[T_tuple] = [tuple(sample) for sample in list_batch]
296
+ return tuple_batch
297
+
298
+ def __len__(self):
299
+ return self._length
300
+
301
+
302
+ class ConcatDataset(Dataset[T_co]):
303
+ r"""Dataset as a concatenation of multiple datasets.
304
+
305
+ This class is useful to assemble different existing datasets.
306
+
307
+ Args:
308
+ datasets (sequence): List of datasets to be concatenated
309
+ """
310
+
311
+ datasets: List[Dataset[T_co]]
312
+ cumulative_sizes: List[int]
313
+
314
+ @staticmethod
315
+ def cumsum(sequence):
316
+ r, s = [], 0
317
+ for e in sequence:
318
+ l = len(e)
319
+ r.append(l + s)
320
+ s += l
321
+ return r
322
+
323
+ def __init__(self, datasets: Iterable[Dataset]) -> None:
324
+ super().__init__()
325
+ self.datasets = list(datasets)
326
+ assert len(self.datasets) > 0, "datasets should not be an empty iterable" # type: ignore[arg-type]
327
+ for d in self.datasets:
328
+ assert not isinstance(
329
+ d, IterableDataset
330
+ ), "ConcatDataset does not support IterableDataset"
331
+ self.cumulative_sizes = self.cumsum(self.datasets)
332
+
333
+ def __len__(self):
334
+ return self.cumulative_sizes[-1]
335
+
336
+ def __getitem__(self, idx):
337
+ if idx < 0:
338
+ if -idx > len(self):
339
+ raise ValueError(
340
+ "absolute value of index should not exceed dataset length"
341
+ )
342
+ idx = len(self) + idx
343
+ dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
344
+ if dataset_idx == 0:
345
+ sample_idx = idx
346
+ else:
347
+ sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
348
+ return self.datasets[dataset_idx][sample_idx]
349
+
350
+ @property
351
+ def cummulative_sizes(self):
352
+ warnings.warn(
353
+ "cummulative_sizes attribute is renamed to " "cumulative_sizes",
354
+ DeprecationWarning,
355
+ stacklevel=2,
356
+ )
357
+ return self.cumulative_sizes
358
+
359
+
360
+ class ChainDataset(IterableDataset):
361
+ r"""Dataset for chaining multiple :class:`IterableDataset` s.
362
+
363
+ This class is useful to assemble different existing dataset streams. The
364
+ chaining operation is done on-the-fly, so concatenating large-scale
365
+ datasets with this class will be efficient.
366
+
367
+ Args:
368
+ datasets (iterable of IterableDataset): datasets to be chained together
369
+ """
370
+
371
+ def __init__(self, datasets: Iterable[Dataset]) -> None:
372
+ super().__init__()
373
+ self.datasets = datasets
374
+
375
+ def __iter__(self):
376
+ for d in self.datasets:
377
+ assert isinstance(
378
+ d, IterableDataset
379
+ ), "ChainDataset only supports IterableDataset"
380
+ yield from d
381
+
382
+ def __len__(self):
383
+ total = 0
384
+ for d in self.datasets:
385
+ assert isinstance(
386
+ d, IterableDataset
387
+ ), "ChainDataset only supports IterableDataset"
388
+ total += len(d) # type: ignore[arg-type]
389
+ return total
390
+
391
+
392
+ class Subset(Dataset[T_co]):
393
+ r"""
394
+ Subset of a dataset at specified indices.
395
+
396
+ Args:
397
+ dataset (Dataset): The whole Dataset
398
+ indices (sequence): Indices in the whole set selected for subset
399
+ """
400
+
401
+ dataset: Dataset[T_co]
402
+ indices: Sequence[int]
403
+
404
+ def __init__(self, dataset: Dataset[T_co], indices: Sequence[int]) -> None:
405
+ self.dataset = dataset
406
+ self.indices = indices
407
+
408
+ def __getitem__(self, idx):
409
+ if isinstance(idx, list):
410
+ return self.dataset[[self.indices[i] for i in idx]]
411
+ return self.dataset[self.indices[idx]]
412
+
413
+ def __getitems__(self, indices: List[int]) -> List[T_co]:
414
+ # add batched sampling support when parent dataset supports it.
415
+ # see torch.utils.data._utils.fetch._MapDatasetFetcher
416
+ if callable(getattr(self.dataset, "__getitems__", None)):
417
+ return self.dataset.__getitems__([self.indices[idx] for idx in indices]) # type: ignore[attr-defined]
418
+ else:
419
+ return [self.dataset[self.indices[idx]] for idx in indices]
420
+
421
+ def __len__(self):
422
+ return len(self.indices)
423
+
424
+
425
+ def random_split(
426
+ dataset: Dataset[T],
427
+ lengths: Sequence[Union[int, float]],
428
+ generator: Optional[Generator] = default_generator,
429
+ ) -> List[Subset[T]]:
430
+ r"""
431
+ Randomly split a dataset into non-overlapping new datasets of given lengths.
432
+
433
+ If a list of fractions that sum up to 1 is given,
434
+ the lengths will be computed automatically as
435
+ floor(frac * len(dataset)) for each fraction provided.
436
+
437
+ After computing the lengths, if there are any remainders, 1 count will be
438
+ distributed in round-robin fashion to the lengths
439
+ until there are no remainders left.
440
+
441
+ Optionally fix the generator for reproducible results, e.g.:
442
+
443
+ Example:
444
+ >>> # xdoctest: +SKIP
445
+ >>> generator1 = torch.Generator().manual_seed(42)
446
+ >>> generator2 = torch.Generator().manual_seed(42)
447
+ >>> random_split(range(10), [3, 7], generator=generator1)
448
+ >>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2)
449
+
450
+ Args:
451
+ dataset (Dataset): Dataset to be split
452
+ lengths (sequence): lengths or fractions of splits to be produced
453
+ generator (Generator): Generator used for the random permutation.
454
+ """
455
+ if math.isclose(sum(lengths), 1) and sum(lengths) <= 1:
456
+ subset_lengths: List[int] = []
457
+ for i, frac in enumerate(lengths):
458
+ if frac < 0 or frac > 1:
459
+ raise ValueError(f"Fraction at index {i} is not between 0 and 1")
460
+ n_items_in_split = int(
461
+ math.floor(len(dataset) * frac) # type: ignore[arg-type]
462
+ )
463
+ subset_lengths.append(n_items_in_split)
464
+ remainder = len(dataset) - sum(subset_lengths) # type: ignore[arg-type]
465
+ # add 1 to all the lengths in round-robin fashion until the remainder is 0
466
+ for i in range(remainder):
467
+ idx_to_add_at = i % len(subset_lengths)
468
+ subset_lengths[idx_to_add_at] += 1
469
+ lengths = subset_lengths
470
+ for i, length in enumerate(lengths):
471
+ if length == 0:
472
+ warnings.warn(
473
+ f"Length of split at index {i} is 0. "
474
+ f"This might result in an empty dataset."
475
+ )
476
+
477
+ # Cannot verify that dataset is Sized
478
+ if sum(lengths) != len(dataset): # type: ignore[arg-type]
479
+ raise ValueError(
480
+ "Sum of input lengths does not equal the length of the input dataset!"
481
+ )
482
+
483
+ indices = randperm(sum(lengths), generator=generator).tolist() # type: ignore[arg-type, call-overload]
484
+ lengths = cast(Sequence[int], lengths)
485
+ return [
486
+ Subset(dataset, indices[offset - length : offset])
487
+ for offset, length in zip(itertools.accumulate(lengths), lengths)
488
+ ]
llmeval-env/lib/python3.10/site-packages/torch/utils/data/distributed.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import TypeVar, Optional, Iterator
3
+
4
+ import torch
5
+ from . import Sampler, Dataset
6
+ import torch.distributed as dist
7
+
8
+ __all__ = ["DistributedSampler", ]
9
+
10
+ T_co = TypeVar('T_co', covariant=True)
11
+
12
+
13
+ class DistributedSampler(Sampler[T_co]):
14
+ r"""Sampler that restricts data loading to a subset of the dataset.
15
+
16
+ It is especially useful in conjunction with
17
+ :class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each
18
+ process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a
19
+ :class:`~torch.utils.data.DataLoader` sampler, and load a subset of the
20
+ original dataset that is exclusive to it.
21
+
22
+ .. note::
23
+ Dataset is assumed to be of constant size and that any instance of it always
24
+ returns the same elements in the same order.
25
+
26
+ Args:
27
+ dataset: Dataset used for sampling.
28
+ num_replicas (int, optional): Number of processes participating in
29
+ distributed training. By default, :attr:`world_size` is retrieved from the
30
+ current distributed group.
31
+ rank (int, optional): Rank of the current process within :attr:`num_replicas`.
32
+ By default, :attr:`rank` is retrieved from the current distributed
33
+ group.
34
+ shuffle (bool, optional): If ``True`` (default), sampler will shuffle the
35
+ indices.
36
+ seed (int, optional): random seed used to shuffle the sampler if
37
+ :attr:`shuffle=True`. This number should be identical across all
38
+ processes in the distributed group. Default: ``0``.
39
+ drop_last (bool, optional): if ``True``, then the sampler will drop the
40
+ tail of the data to make it evenly divisible across the number of
41
+ replicas. If ``False``, the sampler will add extra indices to make
42
+ the data evenly divisible across the replicas. Default: ``False``.
43
+
44
+ .. warning::
45
+ In distributed mode, calling the :meth:`set_epoch` method at
46
+ the beginning of each epoch **before** creating the :class:`DataLoader` iterator
47
+ is necessary to make shuffling work properly across multiple epochs. Otherwise,
48
+ the same ordering will be always used.
49
+
50
+ Example::
51
+
52
+ >>> # xdoctest: +SKIP
53
+ >>> sampler = DistributedSampler(dataset) if is_distributed else None
54
+ >>> loader = DataLoader(dataset, shuffle=(sampler is None),
55
+ ... sampler=sampler)
56
+ >>> for epoch in range(start_epoch, n_epochs):
57
+ ... if is_distributed:
58
+ ... sampler.set_epoch(epoch)
59
+ ... train(loader)
60
+ """
61
+
62
+ def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None,
63
+ rank: Optional[int] = None, shuffle: bool = True,
64
+ seed: int = 0, drop_last: bool = False) -> None:
65
+ if num_replicas is None:
66
+ if not dist.is_available():
67
+ raise RuntimeError("Requires distributed package to be available")
68
+ num_replicas = dist.get_world_size()
69
+ if rank is None:
70
+ if not dist.is_available():
71
+ raise RuntimeError("Requires distributed package to be available")
72
+ rank = dist.get_rank()
73
+ if rank >= num_replicas or rank < 0:
74
+ raise ValueError(
75
+ f"Invalid rank {rank}, rank should be in the interval [0, {num_replicas - 1}]")
76
+ self.dataset = dataset
77
+ self.num_replicas = num_replicas
78
+ self.rank = rank
79
+ self.epoch = 0
80
+ self.drop_last = drop_last
81
+ # If the dataset length is evenly divisible by # of replicas, then there
82
+ # is no need to drop any data, since the dataset will be split equally.
83
+ if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type]
84
+ # Split to nearest available length that is evenly divisible.
85
+ # This is to ensure each rank receives the same amount of data when
86
+ # using this Sampler.
87
+ self.num_samples = math.ceil(
88
+ (len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type]
89
+ )
90
+ else:
91
+ self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type]
92
+ self.total_size = self.num_samples * self.num_replicas
93
+ self.shuffle = shuffle
94
+ self.seed = seed
95
+
96
+ def __iter__(self) -> Iterator[T_co]:
97
+ if self.shuffle:
98
+ # deterministically shuffle based on epoch and seed
99
+ g = torch.Generator()
100
+ g.manual_seed(self.seed + self.epoch)
101
+ indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
102
+ else:
103
+ indices = list(range(len(self.dataset))) # type: ignore[arg-type]
104
+
105
+ if not self.drop_last:
106
+ # add extra samples to make it evenly divisible
107
+ padding_size = self.total_size - len(indices)
108
+ if padding_size <= len(indices):
109
+ indices += indices[:padding_size]
110
+ else:
111
+ indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
112
+ else:
113
+ # remove tail of data to make it evenly divisible.
114
+ indices = indices[:self.total_size]
115
+ assert len(indices) == self.total_size
116
+
117
+ # subsample
118
+ indices = indices[self.rank:self.total_size:self.num_replicas]
119
+ assert len(indices) == self.num_samples
120
+
121
+ return iter(indices)
122
+
123
+ def __len__(self) -> int:
124
+ return self.num_samples
125
+
126
+ def set_epoch(self, epoch: int) -> None:
127
+ r"""
128
+ Set the epoch for this sampler.
129
+
130
+ When :attr:`shuffle=True`, this ensures all replicas
131
+ use a different random ordering for each epoch. Otherwise, the next iteration of this
132
+ sampler will yield the same ordering.
133
+
134
+ Args:
135
+ epoch (int): Epoch number.
136
+ """
137
+ self.epoch = epoch
llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import pickle
3
+ import warnings
4
+
5
+ from collections.abc import Collection
6
+ from typing import Dict, List, Optional, Set, Tuple, Type, Union
7
+
8
+ from torch.utils.data import IterDataPipe, MapDataPipe
9
+ from torch.utils._import_utils import dill_available
10
+
11
+
12
+ __all__ = ["traverse", "traverse_dps"]
13
+
14
+ DataPipe = Union[IterDataPipe, MapDataPipe]
15
+ DataPipeGraph = Dict[int, Tuple[DataPipe, "DataPipeGraph"]] # type: ignore[misc]
16
+
17
+
18
+ def _stub_unpickler():
19
+ return "STUB"
20
+
21
+
22
+ # TODO(VitalyFedyunin): Make sure it works without dill module installed
23
+ def _list_connected_datapipes(scan_obj: DataPipe, only_datapipe: bool, cache: Set[int]) -> List[DataPipe]:
24
+ f = io.BytesIO()
25
+ p = pickle.Pickler(f) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is
26
+ if dill_available():
27
+ from dill import Pickler as dill_Pickler
28
+ d = dill_Pickler(f)
29
+ else:
30
+ d = None
31
+
32
+ captured_connections = []
33
+
34
+ def getstate_hook(ori_state):
35
+ state = None
36
+ if isinstance(ori_state, dict):
37
+ state = {} # type: ignore[assignment]
38
+ for k, v in ori_state.items():
39
+ if isinstance(v, (IterDataPipe, MapDataPipe, Collection)):
40
+ state[k] = v # type: ignore[attr-defined]
41
+ elif isinstance(ori_state, (tuple, list)):
42
+ state = [] # type: ignore[assignment]
43
+ for v in ori_state:
44
+ if isinstance(v, (IterDataPipe, MapDataPipe, Collection)):
45
+ state.append(v) # type: ignore[attr-defined]
46
+ elif isinstance(ori_state, (IterDataPipe, MapDataPipe, Collection)):
47
+ state = ori_state # type: ignore[assignment]
48
+ return state
49
+
50
+ def reduce_hook(obj):
51
+ if obj == scan_obj or id(obj) in cache:
52
+ raise NotImplementedError
53
+ else:
54
+ captured_connections.append(obj)
55
+ # Adding id to remove duplicate DataPipe serialized at the same level
56
+ cache.add(id(obj))
57
+ return _stub_unpickler, ()
58
+
59
+ datapipe_classes: Tuple[Type[DataPipe]] = (IterDataPipe, MapDataPipe) # type: ignore[assignment]
60
+
61
+ try:
62
+ for cls in datapipe_classes:
63
+ cls.set_reduce_ex_hook(reduce_hook)
64
+ if only_datapipe:
65
+ cls.set_getstate_hook(getstate_hook)
66
+ try:
67
+ p.dump(scan_obj)
68
+ except (pickle.PickleError, AttributeError, TypeError):
69
+ if dill_available():
70
+ d.dump(scan_obj)
71
+ else:
72
+ raise
73
+ finally:
74
+ for cls in datapipe_classes:
75
+ cls.set_reduce_ex_hook(None)
76
+ if only_datapipe:
77
+ cls.set_getstate_hook(None)
78
+ if dill_available():
79
+ from dill import extend as dill_extend
80
+ dill_extend(False) # Undo change to dispatch table
81
+ return captured_connections
82
+
83
+
84
+ def traverse_dps(datapipe: DataPipe) -> DataPipeGraph:
85
+ r"""
86
+ Traverse the DataPipes and their attributes to extract the DataPipe graph.
87
+
88
+ This only looks into the attribute from each DataPipe that is either a
89
+ DataPipe and a Python collection object such as ``list``, ``tuple``,
90
+ ``set`` and ``dict``.
91
+
92
+ Args:
93
+ datapipe: the end DataPipe of the graph
94
+ Returns:
95
+ A graph represented as a nested dictionary, where keys are ids of DataPipe instances
96
+ and values are tuples of DataPipe instance and the sub-graph
97
+ """
98
+ cache: Set[int] = set()
99
+ return _traverse_helper(datapipe, only_datapipe=True, cache=cache)
100
+
101
+
102
+ def traverse(datapipe: DataPipe, only_datapipe: Optional[bool] = None) -> DataPipeGraph:
103
+ r"""
104
+ Traverse the DataPipes and their attributes to extract the DataPipe graph.
105
+
106
+ [Deprecated]
107
+ When ``only_dataPipe`` is specified as ``True``, it would only look into the
108
+ attribute from each DataPipe that is either a DataPipe and a Python collection object
109
+ such as ``list``, ``tuple``, ``set`` and ``dict``.
110
+
111
+ Note:
112
+ This function is deprecated. Please use `traverse_dps` instead.
113
+
114
+ Args:
115
+ datapipe: the end DataPipe of the graph
116
+ only_datapipe: If ``False`` (default), all attributes of each DataPipe are traversed.
117
+ This argument is deprecating and will be removed after the next release.
118
+ Returns:
119
+ A graph represented as a nested dictionary, where keys are ids of DataPipe instances
120
+ and values are tuples of DataPipe instance and the sub-graph
121
+ """
122
+ msg = "`traverse` function and will be removed after 1.13. " \
123
+ "Please use `traverse_dps` instead."
124
+ if not only_datapipe:
125
+ msg += " And, the behavior will be changed to the equivalent of `only_datapipe=True`."
126
+ warnings.warn(msg, FutureWarning)
127
+ if only_datapipe is None:
128
+ only_datapipe = False
129
+ cache: Set[int] = set()
130
+ return _traverse_helper(datapipe, only_datapipe, cache)
131
+
132
+
133
+ # Add cache here to prevent infinite recursion on DataPipe
134
+ def _traverse_helper(datapipe: DataPipe, only_datapipe: bool, cache: Set[int]) -> DataPipeGraph:
135
+ if not isinstance(datapipe, (IterDataPipe, MapDataPipe)):
136
+ raise RuntimeError(f"Expected `IterDataPipe` or `MapDataPipe`, but {type(datapipe)} is found")
137
+
138
+ dp_id = id(datapipe)
139
+ if dp_id in cache:
140
+ return {}
141
+ cache.add(dp_id)
142
+ # Using cache.copy() here is to prevent the same DataPipe pollutes the cache on different paths
143
+ items = _list_connected_datapipes(datapipe, only_datapipe, cache.copy())
144
+ d: DataPipeGraph = {dp_id: (datapipe, {})}
145
+ for item in items:
146
+ # Using cache.copy() here is to prevent recursion on a single path rather than global graph
147
+ # Single DataPipe can present multiple times in different paths in graph
148
+ d[dp_id][1].update(_traverse_helper(item, only_datapipe, cache.copy()))
149
+ return d
llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph_settings.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import warnings
3
+
4
+ from typing import Any, List, Optional, Set
5
+
6
+ import torch
7
+
8
+ from torch.utils.data.datapipes.iter.sharding import (
9
+ _ShardingIterDataPipe,
10
+ SHARDING_PRIORITIES,
11
+ )
12
+ from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse_dps
13
+
14
+ __all__ = [
15
+ "apply_random_seed",
16
+ "apply_sharding",
17
+ "apply_shuffle_seed",
18
+ "apply_shuffle_settings",
19
+ "get_all_graph_pipes",
20
+ ]
21
+
22
+
23
+ def get_all_graph_pipes(graph: DataPipeGraph) -> List[DataPipe]:
24
+ return _get_all_graph_pipes_helper(graph, set())
25
+
26
+
27
+ def _get_all_graph_pipes_helper(graph: DataPipeGraph, id_cache: Set[int]) -> List[DataPipe]:
28
+ results: List[DataPipe] = []
29
+ for dp_id, (datapipe, sub_graph) in graph.items():
30
+ if dp_id in id_cache:
31
+ continue
32
+ id_cache.add(dp_id)
33
+ results.append(datapipe)
34
+ results.extend(_get_all_graph_pipes_helper(sub_graph, id_cache))
35
+ return results
36
+
37
+
38
+ def _is_sharding_datapipe(datapipe: DataPipe) -> bool:
39
+ if isinstance(datapipe, _ShardingIterDataPipe):
40
+ return True
41
+ if hasattr(datapipe, "apply_sharding") and inspect.ismethod(datapipe.apply_sharding):
42
+ return True
43
+ return False
44
+
45
+
46
+ def apply_sharding(datapipe: DataPipe,
47
+ num_of_instances: int,
48
+ instance_id: int,
49
+ sharding_group=SHARDING_PRIORITIES.DEFAULT) -> DataPipe:
50
+ r"""
51
+ Apply dynamic sharding over the ``sharding_filter`` DataPipe that has a method ``apply_sharding``.
52
+
53
+ RuntimeError will be raised when multiple ``sharding_filter`` are presented in the same branch.
54
+ """
55
+ graph = traverse_dps(datapipe)
56
+
57
+ def _helper(graph, prev_applied=None):
58
+ for (dp, sub_graph) in graph.values():
59
+ applied = None
60
+ if _is_sharding_datapipe(dp):
61
+ if prev_applied is not None:
62
+ raise RuntimeError("Sharding twice on a single pipeline is likely unintended and will cause data loss. "
63
+ f"Sharding already applied to {prev_applied} while trying to apply to {dp}")
64
+ # For BC, only provide sharding_group if accepted
65
+ sig = inspect.signature(dp.apply_sharding)
66
+ if len(sig.parameters) < 3:
67
+ dp.apply_sharding(num_of_instances, instance_id)
68
+ else:
69
+ dp.apply_sharding(num_of_instances, instance_id, sharding_group=sharding_group)
70
+ applied = dp
71
+ if applied is None:
72
+ applied = prev_applied
73
+ _helper(sub_graph, applied)
74
+
75
+ _helper(graph)
76
+
77
+ return datapipe
78
+
79
+
80
+ def _is_shuffle_datapipe(datapipe: DataPipe) -> bool:
81
+ if not hasattr(datapipe, "set_shuffle") or not hasattr(datapipe, "set_seed"):
82
+ return False
83
+ if not inspect.ismethod(datapipe.set_shuffle) or not inspect.ismethod(datapipe.set_seed):
84
+ return False
85
+ return True
86
+
87
+
88
+ def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool] = None) -> DataPipe:
89
+ r"""
90
+ Traverse the graph of ``DataPipes`` to find and set shuffle attribute.
91
+
92
+ Apply the method to each `DataPipe` that has APIs of ``set_shuffle``
93
+ and ``set_seed``.
94
+
95
+ Args:
96
+ datapipe: DataPipe that needs to set shuffle attribute
97
+ shuffle: Shuffle option (default: ``None`` and no-op to the graph)
98
+ """
99
+ if shuffle is None:
100
+ return datapipe
101
+
102
+ graph = traverse_dps(datapipe)
103
+ all_pipes = get_all_graph_pipes(graph)
104
+ shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)]
105
+ if not shufflers and shuffle:
106
+ warnings.warn(
107
+ "`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. "
108
+ "Be aware that the default buffer size might not be sufficient for your task."
109
+ )
110
+ datapipe = datapipe.shuffle()
111
+ shufflers = [datapipe, ] # type: ignore[list-item]
112
+
113
+ for shuffler in shufflers:
114
+ shuffler.set_shuffle(shuffle)
115
+
116
+ return datapipe
117
+
118
+
119
+ def apply_shuffle_seed(datapipe: DataPipe, rng: Any) -> DataPipe:
120
+ warnings.warn(
121
+ "`apply_shuffle_seed` is deprecated since 1.12 and will be removed in the future releases."
122
+ "\nPlease use `apply_random_seed` instead."
123
+ )
124
+ return apply_random_seed(datapipe, rng)
125
+
126
+
127
+ def _is_random_datapipe(datapipe: DataPipe) -> bool:
128
+ if hasattr(datapipe, "set_seed") and inspect.ismethod(datapipe.set_seed):
129
+ return True
130
+ return False
131
+
132
+
133
+ def apply_random_seed(datapipe: DataPipe, rng: torch.Generator) -> DataPipe:
134
+ r"""
135
+ Traverse the graph of ``DataPipes`` to find random ``DataPipe`` with an API of ``set_seed``.
136
+
137
+ Then set the random seed based on the provided RNG to those ``DataPipe``.
138
+
139
+ Args:
140
+ datapipe: DataPipe that needs to set randomness
141
+ rng: Random number generator to generate random seeds
142
+ """
143
+ graph = traverse_dps(datapipe)
144
+ all_pipes = get_all_graph_pipes(graph)
145
+ # Using a set to track id of DataPipe to prevent setting randomness per DataPipe more than once.
146
+ # And, `id` is used in case of unhashable DataPipe
147
+ cache = set()
148
+ random_datapipes = []
149
+ for pipe in all_pipes:
150
+ if id(pipe) in cache:
151
+ continue
152
+ if _is_random_datapipe(pipe):
153
+ random_datapipes.append(pipe)
154
+ cache.add(id(pipe))
155
+
156
+ for pipe in random_datapipes:
157
+ random_seed = int(torch.empty((), dtype=torch.int64).random_(generator=rng).item())
158
+ pipe.set_seed(random_seed)
159
+
160
+ return datapipe
llmeval-env/lib/python3.10/site-packages/torch/utils/data/sampler.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import Tensor
3
+
4
+ from typing import Iterator, Iterable, Optional, Sequence, List, TypeVar, Generic, Sized, Union
5
+
6
+ __all__ = [
7
+ "BatchSampler",
8
+ "RandomSampler",
9
+ "Sampler",
10
+ "SequentialSampler",
11
+ "SubsetRandomSampler",
12
+ "WeightedRandomSampler",
13
+ ]
14
+
15
+ T_co = TypeVar('T_co', covariant=True)
16
+
17
+
18
+ class Sampler(Generic[T_co]):
19
+ r"""Base class for all Samplers.
20
+
21
+ Every Sampler subclass has to provide an :meth:`__iter__` method, providing a
22
+ way to iterate over indices or lists of indices (batches) of dataset elements, and a :meth:`__len__` method
23
+ that returns the length of the returned iterators.
24
+
25
+ Args:
26
+ data_source (Dataset): This argument is not used and will be removed in 2.2.0.
27
+ You may still have custom implementation that utilizes it.
28
+
29
+ Example:
30
+ >>> # xdoctest: +SKIP
31
+ >>> class AccedingSequenceLengthSampler(Sampler[int]):
32
+ >>> def __init__(self, data: List[str]) -> None:
33
+ >>> self.data = data
34
+ >>>
35
+ >>> def __len__(self) -> int:
36
+ >>> return len(self.data)
37
+ >>>
38
+ >>> def __iter__(self) -> Iterator[int]:
39
+ >>> sizes = torch.tensor([len(x) for x in self.data])
40
+ >>> yield from torch.argsort(sizes).tolist()
41
+ >>>
42
+ >>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]):
43
+ >>> def __init__(self, data: List[str], batch_size: int) -> None:
44
+ >>> self.data = data
45
+ >>> self.batch_size = batch_size
46
+ >>>
47
+ >>> def __len__(self) -> int:
48
+ >>> return (len(self.data) + self.batch_size - 1) // self.batch_size
49
+ >>>
50
+ >>> def __iter__(self) -> Iterator[List[int]]:
51
+ >>> sizes = torch.tensor([len(x) for x in self.data])
52
+ >>> for batch in torch.chunk(torch.argsort(sizes), len(self)):
53
+ >>> yield batch.tolist()
54
+
55
+ .. note:: The :meth:`__len__` method isn't strictly required by
56
+ :class:`~torch.utils.data.DataLoader`, but is expected in any
57
+ calculation involving the length of a :class:`~torch.utils.data.DataLoader`.
58
+ """
59
+
60
+ def __init__(self, data_source: Optional[Sized] = None) -> None:
61
+ if data_source is not None:
62
+ import warnings
63
+
64
+ warnings.warn("`data_source` argument is not used and will be removed in 2.2.0."
65
+ "You may still have custom implementation that utilizes it.")
66
+
67
+ def __iter__(self) -> Iterator[T_co]:
68
+ raise NotImplementedError
69
+
70
+ # NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
71
+ #
72
+ # Many times we have an abstract class representing a collection/iterable of
73
+ # data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally
74
+ # implementing a `__len__` method. In such cases, we must make sure to not
75
+ # provide a default implementation, because both straightforward default
76
+ # implementations have their issues:
77
+ #
78
+ # + `return NotImplemented`:
79
+ # Calling `len(subclass_instance)` raises:
80
+ # TypeError: 'NotImplementedType' object cannot be interpreted as an integer
81
+ #
82
+ # + `raise NotImplementedError()`:
83
+ # This prevents triggering some fallback behavior. E.g., the built-in
84
+ # `list(X)` tries to call `len(X)` first, and executes a different code
85
+ # path if the method is not found or `NotImplemented` is returned, while
86
+ # raising a `NotImplementedError` will propagate and make the call fail
87
+ # where it could have used `__iter__` to complete the call.
88
+ #
89
+ # Thus, the only two sensible things to do are
90
+ #
91
+ # + **not** provide a default `__len__`.
92
+ #
93
+ # + raise a `TypeError` instead, which is what Python uses when users call
94
+ # a method that is not defined on an object.
95
+ # (@ssnl verifies that this works on at least Python 3.7.)
96
+
97
+
98
+ class SequentialSampler(Sampler[int]):
99
+ r"""Samples elements sequentially, always in the same order.
100
+
101
+ Args:
102
+ data_source (Dataset): dataset to sample from
103
+ """
104
+
105
+ data_source: Sized
106
+
107
+ def __init__(self, data_source: Sized) -> None:
108
+ self.data_source = data_source
109
+
110
+ def __iter__(self) -> Iterator[int]:
111
+ return iter(range(len(self.data_source)))
112
+
113
+ def __len__(self) -> int:
114
+ return len(self.data_source)
115
+
116
+
117
+ class RandomSampler(Sampler[int]):
118
+ r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
119
+
120
+ If with replacement, then user can specify :attr:`num_samples` to draw.
121
+
122
+ Args:
123
+ data_source (Dataset): dataset to sample from
124
+ replacement (bool): samples are drawn on-demand with replacement if ``True``, default=``False``
125
+ num_samples (int): number of samples to draw, default=`len(dataset)`.
126
+ generator (Generator): Generator used in sampling.
127
+ """
128
+
129
+ data_source: Sized
130
+ replacement: bool
131
+
132
+ def __init__(self, data_source: Sized, replacement: bool = False,
133
+ num_samples: Optional[int] = None, generator=None) -> None:
134
+ self.data_source = data_source
135
+ self.replacement = replacement
136
+ self._num_samples = num_samples
137
+ self.generator = generator
138
+
139
+ if not isinstance(self.replacement, bool):
140
+ raise TypeError(f"replacement should be a boolean value, but got replacement={self.replacement}")
141
+
142
+ if not isinstance(self.num_samples, int) or self.num_samples <= 0:
143
+ raise ValueError(f"num_samples should be a positive integer value, but got num_samples={self.num_samples}")
144
+
145
+ @property
146
+ def num_samples(self) -> int:
147
+ # dataset size might change at runtime
148
+ if self._num_samples is None:
149
+ return len(self.data_source)
150
+ return self._num_samples
151
+
152
+ def __iter__(self) -> Iterator[int]:
153
+ n = len(self.data_source)
154
+ if self.generator is None:
155
+ seed = int(torch.empty((), dtype=torch.int64).random_().item())
156
+ generator = torch.Generator()
157
+ generator.manual_seed(seed)
158
+ else:
159
+ generator = self.generator
160
+
161
+ if self.replacement:
162
+ for _ in range(self.num_samples // 32):
163
+ yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=generator).tolist()
164
+ yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=generator).tolist()
165
+ else:
166
+ for _ in range(self.num_samples // n):
167
+ yield from torch.randperm(n, generator=generator).tolist()
168
+ yield from torch.randperm(n, generator=generator).tolist()[:self.num_samples % n]
169
+
170
+ def __len__(self) -> int:
171
+ return self.num_samples
172
+
173
+
174
+ class SubsetRandomSampler(Sampler[int]):
175
+ r"""Samples elements randomly from a given list of indices, without replacement.
176
+
177
+ Args:
178
+ indices (sequence): a sequence of indices
179
+ generator (Generator): Generator used in sampling.
180
+ """
181
+
182
+ indices: Sequence[int]
183
+
184
+ def __init__(self, indices: Sequence[int], generator=None) -> None:
185
+ self.indices = indices
186
+ self.generator = generator
187
+
188
+ def __iter__(self) -> Iterator[int]:
189
+ for i in torch.randperm(len(self.indices), generator=self.generator):
190
+ yield self.indices[i]
191
+
192
+ def __len__(self) -> int:
193
+ return len(self.indices)
194
+
195
+
196
+ class WeightedRandomSampler(Sampler[int]):
197
+ r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
198
+
199
+ Args:
200
+ weights (sequence) : a sequence of weights, not necessary summing up to one
201
+ num_samples (int): number of samples to draw
202
+ replacement (bool): if ``True``, samples are drawn with replacement.
203
+ If not, they are drawn without replacement, which means that when a
204
+ sample index is drawn for a row, it cannot be drawn again for that row.
205
+ generator (Generator): Generator used in sampling.
206
+
207
+ Example:
208
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
209
+ >>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
210
+ [4, 4, 1, 4, 5]
211
+ >>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
212
+ [0, 1, 4, 3, 2]
213
+ """
214
+
215
+ weights: Tensor
216
+ num_samples: int
217
+ replacement: bool
218
+
219
+ def __init__(self, weights: Sequence[float], num_samples: int,
220
+ replacement: bool = True, generator=None) -> None:
221
+ if not isinstance(num_samples, int) or isinstance(num_samples, bool) or \
222
+ num_samples <= 0:
223
+ raise ValueError(f"num_samples should be a positive integer value, but got num_samples={num_samples}")
224
+ if not isinstance(replacement, bool):
225
+ raise ValueError(f"replacement should be a boolean value, but got replacement={replacement}")
226
+
227
+ weights_tensor = torch.as_tensor(weights, dtype=torch.double)
228
+ if len(weights_tensor.shape) != 1:
229
+ raise ValueError("weights should be a 1d sequence but given "
230
+ f"weights have shape {tuple(weights_tensor.shape)}")
231
+
232
+ self.weights = weights_tensor
233
+ self.num_samples = num_samples
234
+ self.replacement = replacement
235
+ self.generator = generator
236
+
237
+ def __iter__(self) -> Iterator[int]:
238
+ rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator)
239
+ yield from iter(rand_tensor.tolist())
240
+
241
+ def __len__(self) -> int:
242
+ return self.num_samples
243
+
244
+
245
+ class BatchSampler(Sampler[List[int]]):
246
+ r"""Wraps another sampler to yield a mini-batch of indices.
247
+
248
+ Args:
249
+ sampler (Sampler or Iterable): Base sampler. Can be any iterable object
250
+ batch_size (int): Size of mini-batch.
251
+ drop_last (bool): If ``True``, the sampler will drop the last batch if
252
+ its size would be less than ``batch_size``
253
+
254
+ Example:
255
+ >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
256
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
257
+ >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
258
+ [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
259
+ """
260
+
261
+ def __init__(self, sampler: Union[Sampler[int], Iterable[int]], batch_size: int, drop_last: bool) -> None:
262
+ # Since collections.abc.Iterable does not check for `__getitem__`, which
263
+ # is one way for an object to be an iterable, we don't do an `isinstance`
264
+ # check here.
265
+ if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \
266
+ batch_size <= 0:
267
+ raise ValueError(f"batch_size should be a positive integer value, but got batch_size={batch_size}")
268
+ if not isinstance(drop_last, bool):
269
+ raise ValueError(f"drop_last should be a boolean value, but got drop_last={drop_last}")
270
+ self.sampler = sampler
271
+ self.batch_size = batch_size
272
+ self.drop_last = drop_last
273
+
274
+ def __iter__(self) -> Iterator[List[int]]:
275
+ # Implemented based on the benchmarking in https://github.com/pytorch/pytorch/pull/76951
276
+ if self.drop_last:
277
+ sampler_iter = iter(self.sampler)
278
+ while True:
279
+ try:
280
+ batch = [next(sampler_iter) for _ in range(self.batch_size)]
281
+ yield batch
282
+ except StopIteration:
283
+ break
284
+ else:
285
+ batch = [0] * self.batch_size
286
+ idx_in_batch = 0
287
+ for idx in self.sampler:
288
+ batch[idx_in_batch] = idx
289
+ idx_in_batch += 1
290
+ if idx_in_batch == self.batch_size:
291
+ yield batch
292
+ idx_in_batch = 0
293
+ batch = [0] * self.batch_size
294
+ if idx_in_batch > 0:
295
+ yield batch[:idx_in_batch]
296
+
297
+ def __len__(self) -> int:
298
+ # Can only be called if self.sampler has __len__ implemented
299
+ # We cannot enforce this condition, so we turn off typechecking for the
300
+ # implementation below.
301
+ # Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
302
+ if self.drop_last:
303
+ return len(self.sampler) // self.batch_size # type: ignore[arg-type]
304
+ else:
305
+ return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type]
llmeval-env/lib/python3.10/site-packages/torch/utils/dlpack.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ import torch
4
+ import enum
5
+
6
+ from torch._C import _from_dlpack
7
+ from torch._C import _to_dlpack as to_dlpack
8
+
9
+
10
+ class DLDeviceType(enum.IntEnum):
11
+ # Enums as in DLPack specification (aten/src/ATen/dlpack.h)
12
+ kDLCPU = 1,
13
+ kDLGPU = 2,
14
+ kDLCPUPinned = 3,
15
+ kDLOpenCL = 4,
16
+ kDLVulkan = 7,
17
+ kDLMetal = 8,
18
+ kDLVPI = 9,
19
+ kDLROCM = 10,
20
+ kDLExtDev = 12,
21
+ kDLOneAPI = 14,
22
+
23
+
24
+ torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule
25
+
26
+ Returns an opaque object (a "DLPack capsule") representing the tensor.
27
+
28
+ .. note::
29
+ ``to_dlpack`` is a legacy DLPack interface. The capsule it returns
30
+ cannot be used for anything in Python other than use it as input to
31
+ ``from_dlpack``. The more idiomatic use of DLPack is to call
32
+ ``from_dlpack`` directly on the tensor object - this works when that
33
+ object has a ``__dlpack__`` method, which PyTorch and most other
34
+ libraries indeed have now.
35
+
36
+ .. warning::
37
+ Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
38
+ Behavior when a capsule is consumed multiple times is undefined.
39
+
40
+ Args:
41
+ tensor: a tensor to be exported
42
+
43
+ The DLPack capsule shares the tensor's memory.
44
+ """)
45
+
46
+
47
+ # TODO: add a typing.Protocol to be able to tell Mypy that only objects with
48
+ # __dlpack__ and __dlpack_device__ methods are accepted.
49
+ def from_dlpack(ext_tensor: Any) -> 'torch.Tensor':
50
+ """from_dlpack(ext_tensor) -> Tensor
51
+
52
+ Converts a tensor from an external library into a ``torch.Tensor``.
53
+
54
+ The returned PyTorch tensor will share the memory with the input tensor
55
+ (which may have come from another library). Note that in-place operations
56
+ will therefore also affect the data of the input tensor. This may lead to
57
+ unexpected issues (e.g., other libraries may have read-only flags or
58
+ immutable data structures), so the user should only do this if they know
59
+ for sure that this is fine.
60
+
61
+ Args:
62
+ ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
63
+ The tensor or DLPack capsule to convert.
64
+
65
+ If ``ext_tensor`` is a tensor (or ndarray) object, it must support
66
+ the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
67
+ method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
68
+ an opaque ``PyCapsule`` instance, typically produced by a
69
+ ``to_dlpack`` function or method.
70
+
71
+ Examples::
72
+
73
+ >>> import torch.utils.dlpack
74
+ >>> t = torch.arange(4)
75
+
76
+ # Convert a tensor directly (supported in PyTorch >= 1.10)
77
+ >>> t2 = torch.from_dlpack(t)
78
+ >>> t2[:2] = -1 # show that memory is shared
79
+ >>> t2
80
+ tensor([-1, -1, 2, 3])
81
+ >>> t
82
+ tensor([-1, -1, 2, 3])
83
+
84
+ # The old-style DLPack usage, with an intermediate capsule object
85
+ >>> capsule = torch.utils.dlpack.to_dlpack(t)
86
+ >>> capsule
87
+ <capsule object "dltensor" at ...>
88
+ >>> t3 = torch.from_dlpack(capsule)
89
+ >>> t3
90
+ tensor([-1, -1, 2, 3])
91
+ >>> t3[0] = -9 # now we're sharing memory between 3 tensors
92
+ >>> t3
93
+ tensor([-9, -1, 2, 3])
94
+ >>> t2
95
+ tensor([-9, -1, 2, 3])
96
+ >>> t
97
+ tensor([-9, -1, 2, 3])
98
+
99
+ """
100
+ if hasattr(ext_tensor, '__dlpack__'):
101
+ device = ext_tensor.__dlpack_device__()
102
+ # device is either CUDA or ROCm, we need to pass the current
103
+ # stream
104
+ if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM):
105
+ stream = torch.cuda.current_stream(f'cuda:{device[1]}')
106
+ # cuda_stream is the pointer to the stream and it is a public
107
+ # attribute, but it is not documented
108
+ # The array API specify that the default legacy stream must be passed
109
+ # with a value of 1 for CUDA
110
+ # https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none
111
+ is_cuda = device[0] == DLDeviceType.kDLGPU
112
+ # Since pytorch is not using PTDS by default, lets directly pass
113
+ # the legacy stream
114
+ stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream
115
+ dlpack = ext_tensor.__dlpack__(stream=stream_ptr)
116
+ else:
117
+ dlpack = ext_tensor.__dlpack__()
118
+ else:
119
+ # Old versions just call the converter
120
+ dlpack = ext_tensor
121
+ return _from_dlpack(dlpack)
llmeval-env/lib/python3.10/site-packages/torch/utils/file_baton.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+
4
+
5
+ class FileBaton:
6
+ """A primitive, file-based synchronization utility."""
7
+
8
+ def __init__(self, lock_file_path, wait_seconds=0.1):
9
+ """
10
+ Create a new :class:`FileBaton`.
11
+
12
+ Args:
13
+ lock_file_path: The path to the file used for locking.
14
+ wait_seconds: The seconds to periodically sleep (spin) when
15
+ calling ``wait()``.
16
+ """
17
+ self.lock_file_path = lock_file_path
18
+ self.wait_seconds = wait_seconds
19
+ self.fd = None
20
+
21
+ def try_acquire(self):
22
+ """
23
+ Try to atomically create a file under exclusive access.
24
+
25
+ Returns:
26
+ True if the file could be created, else False.
27
+ """
28
+ try:
29
+ self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL)
30
+ return True
31
+ except FileExistsError:
32
+ return False
33
+
34
+ def wait(self):
35
+ """
36
+ Periodically sleeps for a certain amount until the baton is released.
37
+
38
+ The amount of time slept depends on the ``wait_seconds`` parameter
39
+ passed to the constructor.
40
+ """
41
+ while os.path.exists(self.lock_file_path):
42
+ time.sleep(self.wait_seconds)
43
+
44
+ def release(self):
45
+ """Release the baton and removes its file."""
46
+ if self.fd is not None:
47
+ os.close(self.fd)
48
+
49
+ os.remove(self.lock_file_path)
llmeval-env/lib/python3.10/site-packages/torch/utils/flop_counter.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
4
+ from typing import List, Any, Dict, Optional, Union, NamedTuple
5
+ from collections import defaultdict
6
+ from torch.utils._python_dispatch import TorchDispatchMode
7
+ from torch.utils.hooks import RemovableHandle
8
+ from torch._decomp import register_decomposition
9
+ from math import prod
10
+ from functools import wraps
11
+
12
+
13
+
14
+ __all__ = ["FlopCounterMode", "register_flop_formula"]
15
+
16
+ aten = torch.ops.aten
17
+
18
+ def get_shape(i):
19
+ if isinstance(i, torch.Tensor):
20
+ return i.shape
21
+ return i
22
+
23
+ flop_registry: Dict[Any, Any] = {}
24
+
25
+ def shape_wrapper(f):
26
+ @wraps(f)
27
+ def nf(*args, out=None, **kwargs):
28
+ args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out))
29
+ return f(*args, out_shape=out_shape, **kwargs)
30
+ return nf
31
+
32
+ def register_flop_formula(targets, get_raw=False):
33
+ def register_fun(flop_formula):
34
+ if not get_raw:
35
+ flop_formula = shape_wrapper(flop_formula)
36
+ register_decomposition(targets, registry=flop_registry, unsafe=True)(flop_formula)
37
+ return flop_formula
38
+
39
+ return register_fun
40
+
41
+ @register_flop_formula(aten.mm)
42
+ def mm_flop(a_shape, b_shape, *args, out_shape=None, **kwargs) -> int:
43
+ """Count flops for matmul."""
44
+ # Inputs should be a list of length 2.
45
+ # Inputs contains the shapes of two matrices.
46
+ m, k = a_shape
47
+ k2, n = b_shape
48
+ assert k == k2
49
+ # NB(chilli): Should be 2 * k - 1 technically for FLOPs.
50
+ return m * n * 2 * k
51
+
52
+ @register_flop_formula(aten.addmm)
53
+ def addmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int:
54
+ """Count flops for addmm."""
55
+ return mm_flop(a_shape, b_shape)
56
+
57
+ @register_flop_formula(aten.bmm)
58
+ def bmm_flop(a_shape, b_shape, out_shape=None, **kwargs) -> int:
59
+ """Count flops for the bmm operation."""
60
+ # Inputs should be a list of length 2.
61
+ # Inputs contains the shapes of two tensor.
62
+ b, m, k = a_shape
63
+ b2, k2, n = b_shape
64
+ assert b == b2
65
+ assert k == k2
66
+ # NB(chilli): Should be 2 * k - 1 technically for FLOPs.
67
+ flop = b * m * n * 2 * k
68
+ return flop
69
+
70
+ @register_flop_formula(aten.baddbmm)
71
+ def baddbmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int:
72
+ """Count flops for the baddbmm operation."""
73
+ # Inputs should be a list of length 3.
74
+ # Inputs contains the shapes of three tensors.
75
+ return bmm_flop(a_shape, b_shape)
76
+
77
+
78
+ def conv_flop_count(
79
+ x_shape: List[int],
80
+ w_shape: List[int],
81
+ out_shape: List[int],
82
+ transposed: bool = False,
83
+ ) -> int:
84
+ """Count flops for convolution.
85
+
86
+ Note only multiplication is
87
+ counted. Computation for bias are ignored.
88
+ Flops for a transposed convolution are calculated as
89
+ flops = (x_shape[2:] * prod(w_shape) * batch_size).
90
+ Args:
91
+ x_shape (list(int)): The input shape before convolution.
92
+ w_shape (list(int)): The filter shape.
93
+ out_shape (list(int)): The output shape after convolution.
94
+ transposed (bool): is the convolution transposed
95
+ Returns:
96
+ int: the number of flops
97
+ """
98
+
99
+ batch_size = x_shape[0]
100
+ conv_shape = (x_shape if transposed else out_shape)[2:]
101
+ c_out, c_in, *filter_size = w_shape
102
+
103
+ """
104
+ General idea here is that for a regular conv, for each point in the output
105
+ spatial dimension we convolve the filter with something (hence
106
+ `prod(conv_shape) * prod(filter_size)` ops). Then, this gets multiplied by
107
+ 1. batch_size, 2. the cross product of input and weight channels.
108
+
109
+ For the transpose, it's not each point in the *output* spatial dimension but
110
+ each point in the *input* spatial dimension.
111
+ """
112
+ # NB(chilli): I don't think this properly accounts for padding :think:
113
+ # NB(chilli): Should be 2 * c_in - 1 technically for FLOPs.
114
+ flop = prod(conv_shape) * prod(filter_size) * batch_size * c_out * c_in * 2
115
+ return flop
116
+
117
+ @register_flop_formula([aten.convolution, aten._convolution])
118
+ def conv_flop(x_shape, w_shape, _bias, _stride, _padding, _dilation, transposed, *args, out_shape=None, **kwargs) -> int:
119
+ """Count flops for convolution."""
120
+ return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed)
121
+
122
+
123
+ @register_flop_formula(aten.convolution_backward)
124
+ def conv_backward_flop(
125
+ grad_out_shape,
126
+ x_shape,
127
+ w_shape,
128
+ _bias,
129
+ _stride,
130
+ _padding,
131
+ _dilation,
132
+ transposed,
133
+ _output_padding,
134
+ _groups,
135
+ output_mask,
136
+ out_shape) -> int:
137
+
138
+ def t(shape):
139
+ return [shape[1], shape[0]] + list(shape[2:])
140
+ flop_count = 0
141
+
142
+ """
143
+ Let's say we have a regular 1D conv
144
+ {A, B, C} [inp]
145
+ {i, j} [weight]
146
+ => (conv)
147
+ {Ai + Bj, Bi + Cj} [out]
148
+
149
+ And as a reminder, the transposed conv of the above is
150
+ => {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out]
151
+
152
+ For the backwards of conv, we now have
153
+ {D, E} [grad_out]
154
+ {A, B, C} [inp]
155
+ {i, j} [weight]
156
+
157
+ # grad_inp as conv_transpose(grad_out, weight)
158
+ Let's first compute grad_inp. To do so, we can simply look at all the
159
+ multiplications that each element of inp is involved in. For example, A is
160
+ only involved in the first element of the output (and thus only depends upon
161
+ D in grad_out), and C is only involved in the last element of the output
162
+ (and thus only depends upon E in grad_out)
163
+
164
+ {Di, Dj + Ei, Ej} [grad_inp]
165
+
166
+ Note that this corresponds to the below conv_transpose. This gives us the
167
+ output_mask[0] branch, which is grad_inp.
168
+
169
+ {D, E} [inp (grad_out)]
170
+ {i, j} [weight]
171
+ => (conv_transpose)
172
+ {Di, Dj + Ei, Ej} [out (grad_inp)]
173
+
174
+ I leave the fact that grad_inp for a transposed conv is just conv(grad_out,
175
+ weight) as an exercise for the reader.
176
+
177
+ # grad_weight as conv(inp, grad_out)
178
+ To compute grad_weight, we again look at the terms in the output, which as
179
+ a reminder is:
180
+ => {Ai + Bj, Bi + Cj} [out]
181
+ => {D, E} [grad_out]
182
+ If we manually compute the gradient for the weights, we see it's
183
+ {AD + BE, BD + CE} [grad_weight]
184
+
185
+ This corresponds to the below conv
186
+ {A, B, C} [inp]
187
+ {D, E} [weight (grad_out)]
188
+ => (conv)
189
+ {AD + BE, BD + CE} [out (grad_weight)]
190
+
191
+ # grad_weight of transposed conv as conv(grad_out, inp)
192
+ As a reminder, the terms of the output of a transposed conv are:
193
+ => {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out]
194
+ => {D, E, F, G} [grad_out]
195
+
196
+ Manually computing the gradient for the weights, we see it's
197
+ {AD + BE + CF, AE + BF + CG} [grad_weight]
198
+
199
+ This corresponds to the below conv
200
+ {D, E, F, G} [inp (grad_out)]
201
+ {A, B, C} [weight (inp)]
202
+ => (conv)
203
+ {AD + BE + CF, AE + BF + CG} [out (grad_weight)]
204
+
205
+ For the full backwards formula, there are also some details involving
206
+ transpose of the batch/channel dimensions and groups, but I skip those for
207
+ the sake of brevity (and they're pretty similar to matmul backwards)
208
+
209
+ Check [conv backwards decomposition as conv forwards]
210
+ """
211
+ # grad_inp as conv_transpose(grad_out, weight)
212
+ if output_mask[0]:
213
+ grad_input_shape = get_shape(out_shape[0])
214
+ flop_count += conv_flop_count(grad_out_shape, w_shape, grad_input_shape, not transposed)
215
+
216
+ if output_mask[1]:
217
+ grad_weight_shape = get_shape(out_shape[1])
218
+ if transposed:
219
+ # grad_weight of transposed conv as conv(grad_out, inp)
220
+ flop_count += conv_flop_count(t(grad_out_shape), t(x_shape), t(grad_weight_shape), transposed=False)
221
+ else:
222
+ # grad_weight as conv(inp, grad_out)
223
+ flop_count += conv_flop_count(t(x_shape), t(grad_out_shape), t(grad_weight_shape), transposed=False)
224
+
225
+ return flop_count
226
+
227
+ def sdpa_flop_count(query_shape, key_shape, value_shape):
228
+ """
229
+ Count flops for self-attention.
230
+
231
+ NB: We can assume that value_shape == key_shape
232
+ """
233
+ b, h, s_q, d_q = query_shape
234
+ _b2, _h2, s_k, _d2 = key_shape
235
+ _b3, _h3, _s3, d_v = value_shape
236
+ assert b == _b2 == _b3 and h == _h2 == _h3 and d_q == _d2 and s_k == _s3 and d_q == _d2
237
+ total_flops = 0
238
+ # q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k]
239
+ total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k))
240
+ # scores: [b, h, s_q, s_k] @ v: [b, h, s_k, d_v] -> out: [b, h, s_q, d_v]
241
+ total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_v))
242
+ return total_flops
243
+
244
+
245
+ @register_flop_formula([aten._scaled_dot_product_efficient_attention, aten._scaled_dot_product_flash_attention])
246
+ def sdpa_flop(query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int:
247
+ """Count flops for self-attention."""
248
+ # NB: We aren't accounting for causal attention here
249
+ return sdpa_flop_count(query_shape, key_shape, value_shape)
250
+
251
+
252
+ def sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape):
253
+ total_flops = 0
254
+ b, h, s_q, d_q = query_shape
255
+ _b2, _h2, s_k, _d2 = key_shape
256
+ _b3, _h3, _s3, d_v = value_shape
257
+ _b4, _h4, _s4, _d4 = grad_out_shape
258
+ assert b == _b2 == _b3 == _b4 and h == _h2 == _h3 == _h4 and d_q == _d2
259
+ assert d_v == _d4 and s_k == _s3 and s_q == _s4
260
+ total_flops = 0
261
+ # Step 1: We recompute the scores matrix.
262
+ # q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k]
263
+ total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k))
264
+
265
+ # Step 2: We propagate the gradients through the score @ v operation.
266
+ # gradOut: [b, h, s_q, d_v] @ v: [b, h, d_v, s_k] -> gradScores: [b, h, s_q, s_k]
267
+ total_flops += bmm_flop((b * h, s_q, d_v), (b * h, d_v, s_k))
268
+ # scores: [b, h, s_k, s_q] @ gradOut: [b, h, s_q, d_v] -> gradV: [b, h, s_k, d_v]
269
+ total_flops += bmm_flop((b * h, s_k, s_q), (b * h, s_q, d_v))
270
+
271
+ # Step 3: We propagate th gradients through the k @ v operation
272
+ # gradScores: [b, h, s_q, s_k] @ k: [b, h, s_k, d_q] -> gradQ: [b, h, s_q, d_q]
273
+ total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_q))
274
+ # q: [b, h, d_q, s_q] @ gradScores: [b, h, s_q, s_k] -> gradK: [b, h, d_q, s_k]
275
+ total_flops += bmm_flop((b * h, d_q, s_q), (b * h, s_q, s_k))
276
+ return total_flops
277
+
278
+
279
+ @register_flop_formula([aten._scaled_dot_product_efficient_attention_backward, aten._scaled_dot_product_flash_attention_backward])
280
+ def sdpa_backward_flop(grad_out_shape, query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int:
281
+ """Count flops for self-attention backward."""
282
+ return sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape)
283
+
284
+ flop_registry = {
285
+ aten.mm: mm_flop,
286
+ aten.addmm: addmm_flop,
287
+ aten.bmm: bmm_flop,
288
+ aten.baddbmm: baddbmm_flop,
289
+ aten.convolution: conv_flop,
290
+ aten._convolution: conv_flop,
291
+ aten.convolution_backward: conv_backward_flop,
292
+ aten._scaled_dot_product_efficient_attention: sdpa_flop,
293
+ aten._scaled_dot_product_flash_attention: sdpa_flop,
294
+ aten._scaled_dot_product_efficient_attention_backward: sdpa_backward_flop,
295
+ aten._scaled_dot_product_flash_attention_backward: sdpa_backward_flop,
296
+ }
297
+
298
+ def normalize_tuple(x):
299
+ if not isinstance(x, tuple):
300
+ return (x,)
301
+ return x
302
+
303
+
304
+ # Define the suffixes for different orders of magnitude
305
+ suffixes = ["", "K", "M", "B", "T"]
306
+ # Thanks BingChat!
307
+ def get_suffix_str(number):
308
+ # Find the index of the appropriate suffix based on the number of digits
309
+ # with some additional overflow.
310
+ # i.e. 1.01B should be displayed as 1001M, not 1.001B
311
+ index = max(0, min(len(suffixes) - 1, (len(str(number)) - 2) // 3))
312
+ return suffixes[index]
313
+
314
+ def convert_num_with_suffix(number, suffix):
315
+ index = suffixes.index(suffix)
316
+ # Divide the number by 1000^index and format it to two decimal places
317
+ value = f"{number / 1000 ** index:.3f}"
318
+ # Return the value and the suffix as a string
319
+ return value + suffixes[index]
320
+
321
+ def convert_to_percent_str(num, denom):
322
+ if denom == 0:
323
+ return "0%"
324
+ return f"{num / denom:.2%}"
325
+
326
+ def _pytreeify_preserve_structure(f):
327
+ @wraps(f)
328
+ def nf(args):
329
+ flat_args, spec = tree_flatten(args)
330
+ out = f(*flat_args)
331
+ return tree_unflatten(out, spec)
332
+
333
+ return nf
334
+
335
+
336
+ class FlopCounterMode(TorchDispatchMode):
337
+ """
338
+ ``FlopCounterMode`` is a context manager that counts the number of flops within its context.
339
+
340
+ It does this using a ``TorchDispatchMode``.
341
+
342
+ It also supports hierarchical output by passing a module (or list of
343
+ modules) to FlopCounterMode on construction. If you do not need hierarchical
344
+ output, you do not need to use it with a module.
345
+
346
+ Example usage
347
+
348
+ .. code-block:: python
349
+
350
+ mod = ...
351
+ flop_counter = FlopCounterMode(mod)
352
+ with flop_counter:
353
+ mod.sum().backward()
354
+
355
+ """
356
+
357
+ def __init__(
358
+ self,
359
+ mods: Optional[Union[torch.nn.Module, List[torch.nn.Module]]] = None,
360
+ depth: int = 2,
361
+ display: bool = True,
362
+ custom_mapping: Optional[Dict[Any, Any]] = None):
363
+ self.flop_counts: Dict[str, Dict[Any, int]] = defaultdict(lambda: defaultdict(int))
364
+ self.depth = depth
365
+ self.parents = ["Global"]
366
+ self.in_backward = False
367
+ self.display = display
368
+ if custom_mapping is None:
369
+ custom_mapping = {}
370
+ if isinstance(mods, torch.nn.Module):
371
+ mods = [mods]
372
+ self.mods = mods
373
+ # Keys will include the modules in `mods` and their submodules
374
+ self._module_to_forward_hook_handles: Dict[nn.Module, _ForwardHookHandles] = {}
375
+ self.flop_registry = {
376
+ **flop_registry,
377
+ **{k: v if getattr(v, "_get_raw", False) else shape_wrapper(v) for k, v in custom_mapping.items()}
378
+ }
379
+
380
+ def _register_forward_hooks(self):
381
+ if self.mods is None:
382
+ return
383
+ for mod in self.mods:
384
+ prefix = type(mod).__name__
385
+ for name, module in dict(mod.named_modules()).items():
386
+ if name == "":
387
+ name = prefix
388
+ else:
389
+ name = ".".join([prefix, name])
390
+
391
+ forward_pre_hook_handle = module.register_forward_pre_hook(self._enter_module(name))
392
+ forward_hook_handle = module.register_forward_hook(self._exit_module(name))
393
+ self._module_to_forward_hook_handles[module] = _ForwardHookHandles(
394
+ forward_pre_hook_handle, forward_hook_handle
395
+ )
396
+
397
+ def _deregister_forward_hooks(self):
398
+ for forward_hook_handles in self._module_to_forward_hook_handles.values():
399
+ forward_hook_handles[0].remove()
400
+ forward_hook_handles[1].remove()
401
+ self._module_to_forward_hook_handles.clear()
402
+
403
+ def _enter_module(self, name):
404
+ def f(module, inputs):
405
+ out = _pytreeify_preserve_structure(self._create_pre_module(name))(inputs)
406
+ return out
407
+
408
+ return f
409
+
410
+ def _exit_module(self, name):
411
+ def f(module, inputs, outputs):
412
+ outputs = _pytreeify_preserve_structure(self._create_post_module(name))(outputs)
413
+ return outputs
414
+ return f
415
+
416
+ def _create_post_module(self, name):
417
+ class PushState(torch.autograd.Function):
418
+ @staticmethod
419
+ def forward(ctx, *args):
420
+ assert self.parents[-1] == name, f"{self.parents[-1]} is not {name}"
421
+ self.parents.pop()
422
+ args = tree_map(lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args)
423
+ return args
424
+
425
+ @staticmethod
426
+ def backward(ctx, *grad_outs):
427
+ self.in_backward = True
428
+ self.parents.append(name)
429
+ return grad_outs
430
+
431
+ return PushState.apply
432
+
433
+ def _create_pre_module(self, name):
434
+ class PopState(torch.autograd.Function):
435
+ @staticmethod
436
+ def forward(ctx, *args):
437
+ if self.in_backward:
438
+ self.parents = ["Global"]
439
+ self.in_backward = True
440
+ self.parents.append(name)
441
+ args = tree_map(lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args)
442
+ return args
443
+
444
+ @staticmethod
445
+ def backward(ctx, *grad_outs):
446
+ assert self.parents[-1] == name
447
+ self.parents.pop()
448
+ return grad_outs
449
+
450
+ return PopState.apply
451
+
452
+ def get_total_flops(self) -> int:
453
+ return sum(self.flop_counts['Global'].values())
454
+
455
+ def get_flop_counts(self) -> Dict[str, Dict[Any, int]]:
456
+ """Return the flop counts as a dictionary of dictionaries.
457
+
458
+ The outer
459
+ dictionary is keyed by module name, and the inner dictionary is keyed by
460
+ operation name.
461
+
462
+ Returns:
463
+ Dict[str, Dict[Any, int]]: The flop counts as a dictionary.
464
+ """
465
+ return {k: dict(v) for k, v in self.flop_counts.items()}
466
+
467
+ def get_table(self, depth=None):
468
+ if depth is None:
469
+ depth = self.depth
470
+ if depth is None:
471
+ depth = 999999
472
+
473
+ import tabulate
474
+ tabulate.PRESERVE_WHITESPACE = True
475
+ header = ["Module", "FLOP", "% Total"]
476
+ values = []
477
+ global_flops = self.get_total_flops()
478
+ global_suffix = get_suffix_str(global_flops)
479
+ is_global_subsumed = False
480
+
481
+ def process_mod(mod_name, depth):
482
+ nonlocal is_global_subsumed
483
+
484
+ total_flops = sum(self.flop_counts[mod_name].values())
485
+
486
+ is_global_subsumed |= total_flops >= global_flops
487
+
488
+ padding = " " * depth
489
+ values = []
490
+ values.append([
491
+ padding + mod_name,
492
+ convert_num_with_suffix(total_flops, global_suffix),
493
+ convert_to_percent_str(total_flops, global_flops)
494
+ ])
495
+ for k, v in self.flop_counts[mod_name].items():
496
+ values.append([
497
+ padding + " - " + str(k),
498
+ convert_num_with_suffix(v, global_suffix),
499
+ convert_to_percent_str(v, global_flops)
500
+ ])
501
+ return values
502
+
503
+ for mod in self.flop_counts.keys():
504
+ if mod == 'Global':
505
+ continue
506
+ mod_depth = mod.count(".") + 1
507
+ if mod_depth > depth:
508
+ continue
509
+
510
+ cur_values = process_mod(mod, mod_depth - 1)
511
+ values.extend(cur_values)
512
+
513
+ # We do a bit of messing around here to only output the "Global" value
514
+ # if there are any FLOPs in there that aren't already fully contained by
515
+ # a module.
516
+ if 'Global' in self.flop_counts and not is_global_subsumed:
517
+ for idx, value in enumerate(values):
518
+ values[idx][0] = " " + values[idx][0]
519
+
520
+ values = process_mod('Global', 0) + values
521
+
522
+ if len(values) == 0:
523
+ values = [["Global", "0", "0%"]]
524
+
525
+ return tabulate.tabulate(values, headers=header, colalign=("left", "right", "right"))
526
+
527
+ def __enter__(self):
528
+ self.flop_counts.clear()
529
+ self._register_forward_hooks()
530
+ super().__enter__()
531
+ return self
532
+
533
+ def __exit__(self, *args):
534
+ if self.display:
535
+ print(self.get_table(self.depth))
536
+ self._deregister_forward_hooks()
537
+ super().__exit__(*args)
538
+
539
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
540
+ kwargs = kwargs if kwargs else {}
541
+ out = func(*args, **kwargs)
542
+ func_packet = func._overloadpacket
543
+ if func_packet in self.flop_registry:
544
+ flop_count_func = self.flop_registry[func_packet]
545
+ flop_count = flop_count_func(*args, **kwargs, out=out) # type: ignore[operator]
546
+ if len(set(self.parents)) != len(self.parents):
547
+ print(
548
+ "The module hierarchy tracking seems to be messed up."
549
+ "Please file a bug or just run the flop counter without"
550
+ "tracking the module hierarchy (i.e. `with FlopCounterMode():`)"
551
+ )
552
+ for par in set(self.parents):
553
+ self.flop_counts[par][func_packet] += flop_count
554
+
555
+ return out
556
+
557
+ class _ForwardHookHandles(NamedTuple):
558
+ forward_pre_hook_handle: RemovableHandle
559
+ forward_hook_handle: RemovableHandle
llmeval-env/lib/python3.10/site-packages/torch/utils/hooks.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from collections import OrderedDict
3
+ import weakref
4
+ import warnings
5
+ from typing import Any, Tuple
6
+
7
+ __all__ = ["RemovableHandle", "unserializable_hook", "warn_if_has_hooks", "BackwardHook"]
8
+
9
+ class RemovableHandle:
10
+ r"""
11
+ A handle which provides the capability to remove a hook.
12
+
13
+ Args:
14
+ hooks_dict (dict): A dictionary of hooks, indexed by hook ``id``.
15
+ extra_dict (Union[dict, List[dict]]): An additional dictionary or list of
16
+ dictionaries whose keys will be deleted when the same keys are
17
+ removed from ``hooks_dict``.
18
+ """
19
+
20
+ id: int
21
+ next_id: int = 0
22
+
23
+ def __init__(self, hooks_dict: Any, *, extra_dict: Any = None) -> None:
24
+ self.hooks_dict_ref = weakref.ref(hooks_dict)
25
+ self.id = RemovableHandle.next_id
26
+ RemovableHandle.next_id += 1
27
+
28
+ self.extra_dict_ref: Tuple = ()
29
+ if isinstance(extra_dict, dict):
30
+ self.extra_dict_ref = (weakref.ref(extra_dict),)
31
+ elif isinstance(extra_dict, list):
32
+ self.extra_dict_ref = tuple(weakref.ref(d) for d in extra_dict)
33
+
34
+ def remove(self) -> None:
35
+ hooks_dict = self.hooks_dict_ref()
36
+ if hooks_dict is not None and self.id in hooks_dict:
37
+ del hooks_dict[self.id]
38
+
39
+ for ref in self.extra_dict_ref:
40
+ extra_dict = ref()
41
+ if extra_dict is not None and self.id in extra_dict:
42
+ del extra_dict[self.id]
43
+
44
+ def __getstate__(self):
45
+ if self.extra_dict_ref is None:
46
+ return (self.hooks_dict_ref(), self.id)
47
+ else:
48
+ return (self.hooks_dict_ref(), self.id, tuple(ref() for ref in self.extra_dict_ref))
49
+
50
+ def __setstate__(self, state) -> None:
51
+ if state[0] is None:
52
+ # create a dead reference
53
+ self.hooks_dict_ref = weakref.ref(OrderedDict())
54
+ else:
55
+ self.hooks_dict_ref = weakref.ref(state[0])
56
+ self.id = state[1]
57
+ RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1)
58
+
59
+ if len(state) < 3 or state[2] is None:
60
+ self.extra_dict_ref = ()
61
+ else:
62
+ self.extra_dict_ref = tuple(weakref.ref(d) for d in state[2])
63
+
64
+ def __enter__(self) -> "RemovableHandle":
65
+ return self
66
+
67
+ def __exit__(self, type: Any, value: Any, tb: Any) -> None:
68
+ self.remove()
69
+
70
+
71
+ def unserializable_hook(f):
72
+ """
73
+ Mark a function as an unserializable hook with this decorator.
74
+
75
+ This suppresses warnings that would otherwise arise if you attempt
76
+ to serialize a tensor that has a hook.
77
+ """
78
+ f.__torch_unserializable__ = True
79
+ return f
80
+
81
+
82
+ def warn_if_has_hooks(tensor):
83
+ if tensor._backward_hooks:
84
+ for k in tensor._backward_hooks:
85
+ hook = tensor._backward_hooks[k]
86
+ if not hasattr(k, "__torch_unserializable__"):
87
+ warnings.warn(f"backward hook {repr(hook)} on tensor will not be "
88
+ "serialized. If this is expected, you can "
89
+ "decorate the function with @torch.utils.hooks.unserializable_hook "
90
+ "to suppress this warning")
91
+
92
+ class BackwardHook:
93
+ """
94
+ A wrapper class to implement nn.Module backward hooks.
95
+
96
+ It handles:
97
+ - Ignoring non-Tensor inputs and replacing them by None before calling the user hook
98
+ - Generating the proper Node to capture a set of Tensor's gradients
99
+ - Linking the gradients captures for the outputs with the gradients captured for the input
100
+ - Calling the user hook once both output and input gradients are available
101
+ """
102
+
103
+ def __init__(self, module, user_hooks, user_pre_hooks):
104
+ self.user_hooks = user_hooks
105
+ self.user_pre_hooks = user_pre_hooks
106
+ self.module = module
107
+
108
+ self.grad_outputs = None
109
+ self.n_outputs = -1
110
+ self.output_tensors_index = None
111
+ self.n_inputs = -1
112
+ self.input_tensors_index = None
113
+
114
+ def _pack_with_none(self, indices, values, size):
115
+ res = [None] * size
116
+ for idx, val in zip(indices, values):
117
+ res[idx] = val
118
+
119
+ return tuple(res)
120
+
121
+ def _unpack_none(self, indices, values):
122
+ res = []
123
+ for idx in indices:
124
+ res.append(values[idx])
125
+
126
+ return tuple(res)
127
+
128
+ def _set_user_hook(self, grad_fn):
129
+ def hook(grad_input, _):
130
+ if self.grad_outputs is None:
131
+ # This happens because the gradient in your nn.Module flows to
132
+ # the Module's input without " passing through the Module's
133
+ # output, e.g. when you're doing double backward.
134
+ return
135
+ res = self._pack_with_none(self.input_tensors_index, grad_input, self.n_inputs)
136
+
137
+ for hook in self.user_hooks:
138
+ out = hook(self.module, res, self.grad_outputs)
139
+
140
+ if out is None:
141
+ continue
142
+
143
+ if len(out) != len(res):
144
+ raise RuntimeError("Backward hook returned an invalid number of grad_input, "
145
+ f"got {len(out)}, but expected {len(res)}")
146
+
147
+ res = out
148
+
149
+ self.grad_outputs = None
150
+
151
+ return self._unpack_none(self.input_tensors_index, res)
152
+
153
+ grad_fn.register_hook(hook)
154
+
155
+ def _apply_on_tensors(self, fn, args):
156
+ # Can be used to apply the given function to the tensors contained in the
157
+ # args. Will return updated args and the tensors indices
158
+ tensors_idx = []
159
+ tensors = []
160
+
161
+ requires_grad = False
162
+ for i, arg in enumerate(args):
163
+ if isinstance(arg, torch.Tensor):
164
+ tensors_idx.append(i)
165
+ tensors.append(arg)
166
+ requires_grad |= arg.requires_grad
167
+
168
+ if not (requires_grad and torch.is_grad_enabled()):
169
+ return args, None
170
+
171
+ new_tensors = torch.nn.modules._functions.BackwardHookFunction.apply(*tensors)
172
+ if len(new_tensors) == 0:
173
+ raise RuntimeError("Cannot set Module backward hook for a Module with no input Tensors.")
174
+
175
+ grad_fns = [t.grad_fn for t in new_tensors if t.grad_fn is not None and t.grad_fn.name() == "BackwardHookFunctionBackward"]
176
+ if len(grad_fns) == 0:
177
+ raise RuntimeError("Error while setting up backward hooks. Please open "
178
+ "an issue with a code sample to reproduce this.")
179
+
180
+ fn(grad_fns[0])
181
+
182
+ arg_list = list(args)
183
+ for idx, val in zip(tensors_idx, new_tensors):
184
+ arg_list[idx] = val
185
+
186
+ if type(args) is tuple:
187
+ out = tuple(arg_list)
188
+ else:
189
+ out = type(args)(*arg_list)
190
+ return out, tensors_idx
191
+
192
+ def setup_input_hook(self, args):
193
+ def fn(grad_fn):
194
+ self._set_user_hook(grad_fn)
195
+
196
+ res, input_idx = self._apply_on_tensors(fn, args)
197
+ self.n_inputs = len(args)
198
+ self.input_tensors_index = input_idx
199
+ return res
200
+
201
+ def setup_output_hook(self, args):
202
+ def fn(grad_fn):
203
+ def hook(_, grad_output):
204
+ self.grad_outputs = self._pack_with_none(self.output_tensors_index,
205
+ grad_output,
206
+ self.n_outputs)
207
+
208
+ if self.user_pre_hooks:
209
+ expected_len = len(self.grad_outputs)
210
+ for user_pre_hook in self.user_pre_hooks:
211
+ hook_grad_outputs = user_pre_hook(self.module, self.grad_outputs)
212
+ if hook_grad_outputs is None:
213
+ continue
214
+
215
+ actual_len = len(hook_grad_outputs)
216
+ if actual_len != expected_len:
217
+ raise RuntimeError("Backward pre hook returned an invalid number of grad_output, "
218
+ f"got {actual_len}, but expected {expected_len}")
219
+ self.grad_outputs = hook_grad_outputs
220
+
221
+ # We need to be able to clear self.grad_outputs but also return it
222
+ local_grad_outputs = self.grad_outputs
223
+
224
+ # Special case if no input required gradients, this hook should call the user
225
+ # hook directly
226
+ if self.input_tensors_index is None:
227
+ grad_inputs = self._pack_with_none([], [], self.n_inputs)
228
+ for user_hook in self.user_hooks:
229
+ res = user_hook(self.module, grad_inputs, self.grad_outputs)
230
+ if res is not None and not (isinstance(res, tuple) and all(el is None for el in res)):
231
+ raise RuntimeError("Backward hook for Modules where no input requires "
232
+ "gradient should always return None or None for all gradients.")
233
+ self.grad_outputs = None
234
+
235
+ if local_grad_outputs is not None:
236
+ assert self.output_tensors_index is not None # mypy
237
+ return tuple(local_grad_outputs[i] for i in self.output_tensors_index)
238
+
239
+ grad_fn.register_hook(hook)
240
+
241
+ is_tuple = True
242
+ if not isinstance(args, tuple):
243
+ args = (args,)
244
+ is_tuple = False
245
+
246
+ res, output_idx = self._apply_on_tensors(fn, args)
247
+ self.n_outputs = len(args)
248
+ self.output_tensors_index = output_idx
249
+
250
+ if not is_tuple:
251
+ res = res[0]
252
+ return res
llmeval-env/lib/python3.10/site-packages/torch/utils/mkldnn.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class MkldnnLinear(torch.jit.ScriptModule):
5
+ def __init__(self, dense_module, dtype):
6
+ super().__init__()
7
+ self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype))
8
+ if dense_module.bias is not None:
9
+ # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy,
10
+ # we use fp32 dtype.
11
+ self.register_buffer('bias', dense_module.bias.to_mkldnn())
12
+ else:
13
+ # TODO: Remove this once ScriptModule supports registering None buffer
14
+ self.register_buffer(
15
+ 'bias',
16
+ torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
17
+
18
+ @torch.jit.script_method
19
+ def __getstate__(self):
20
+ return (self.weight.to_dense(), self.bias.to_dense(), self.training)
21
+
22
+ @torch.jit.script_method
23
+ def __setstate__(self, state):
24
+ self.weight = state[0].to_mkldnn()
25
+ self.bias = state[1].to_mkldnn()
26
+ self.training = state[2]
27
+
28
+ @torch.jit.script_method
29
+ def forward(self, x):
30
+ x_mkldnn = x if x.is_mkldnn else x.to_mkldnn()
31
+ y_mkldnn = torch._C._nn.mkldnn_linear(x_mkldnn, self.weight, self.bias)
32
+ y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense()
33
+ return y
34
+
35
+
36
+ class _MkldnnConvNd(torch.jit.ScriptModule):
37
+ """Common base of MkldnnConv1d and MkldnnConv2d."""
38
+
39
+ __constants__ = ['stride', 'padding', 'dilation', 'groups']
40
+
41
+ def __init__(self, dense_module):
42
+ super().__init__()
43
+
44
+ self.stride = dense_module.stride
45
+ self.padding = dense_module.padding
46
+ self.dilation = dense_module.dilation
47
+ self.groups = dense_module.groups
48
+
49
+ if dense_module.bias is not None:
50
+ self.register_buffer('bias', dense_module.bias.to_mkldnn())
51
+ else:
52
+ # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy,
53
+ # we use fp32 dtype.
54
+ # TODO: Remove this once ScriptModule supports registering None buffer
55
+ self.register_buffer(
56
+ 'bias',
57
+ torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
58
+
59
+ @torch.jit.script_method
60
+ def __getstate__(self):
61
+ return (self.weight.to_dense(), self.bias.to_dense(), self.training)
62
+
63
+ @torch.jit.script_method
64
+ def forward(self, x):
65
+ return torch.mkldnn_convolution(
66
+ x,
67
+ self.weight,
68
+ self.bias,
69
+ self.padding,
70
+ self.stride,
71
+ self.dilation,
72
+ self.groups)
73
+
74
+
75
+ class MkldnnConv1d(_MkldnnConvNd):
76
+ def __init__(self, dense_module, dtype):
77
+ super().__init__(dense_module)
78
+
79
+ self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype))
80
+
81
+ @torch.jit.script_method
82
+ def __setstate__(self, state):
83
+ self.weight = state[0].to_mkldnn()
84
+ self.bias = state[1].to_mkldnn()
85
+ self.training = state[2]
86
+
87
+
88
+ class MkldnnConv2d(_MkldnnConvNd):
89
+ def __init__(self, dense_module, dtype):
90
+ super().__init__(dense_module)
91
+
92
+ self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv2d_weight(
93
+ dense_module.weight.to_mkldnn(dtype),
94
+ self.padding,
95
+ self.stride,
96
+ self.dilation,
97
+ self.groups))
98
+
99
+ @torch.jit.script_method
100
+ def __setstate__(self, state):
101
+ self.weight = torch._C._nn.mkldnn_reorder_conv2d_weight(
102
+ state[0].to_mkldnn(),
103
+ self.padding,
104
+ self.stride,
105
+ self.dilation,
106
+ self.groups)
107
+ self.bias = state[1].to_mkldnn()
108
+ self.training = state[2]
109
+
110
+ class MkldnnConv3d(_MkldnnConvNd):
111
+ def __init__(self, dense_module, dtype):
112
+ super().__init__(dense_module)
113
+
114
+ self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv3d_weight(
115
+ dense_module.weight.to_mkldnn(dtype),
116
+ self.padding,
117
+ self.stride,
118
+ self.dilation,
119
+ self.groups))
120
+
121
+ @torch.jit.script_method
122
+ def __setstate__(self, state):
123
+ self.weight = torch._C._nn.mkldnn_reorder_conv3d_weight(
124
+ state[0].to_mkldnn(),
125
+ self.padding,
126
+ self.stride,
127
+ self.dilation,
128
+ self.groups)
129
+ self.bias = state[1].to_mkldnn()
130
+ self.training = state[2]
131
+
132
+
133
+ class MkldnnBatchNorm(torch.jit.ScriptModule):
134
+ __constants__ = ['exponential_average_factor', 'eps']
135
+
136
+ def __init__(self, dense_module):
137
+ super().__init__()
138
+
139
+ assert not dense_module.training
140
+ assert dense_module.track_running_stats
141
+ assert dense_module.affine
142
+
143
+ if dense_module.momentum is None:
144
+ self.exponential_average_factor = 0.0
145
+ else:
146
+ self.exponential_average_factor = dense_module.momentum
147
+ self.eps = dense_module.eps
148
+
149
+ self.register_buffer('weight', dense_module.weight.to_mkldnn())
150
+ self.register_buffer('bias', dense_module.bias.to_mkldnn())
151
+ self.register_buffer('running_mean', dense_module.running_mean.to_mkldnn())
152
+ self.register_buffer('running_var', dense_module.running_var.to_mkldnn())
153
+
154
+ @torch.jit.script_method
155
+ def __getstate__(self):
156
+ weight = self.weight.to_dense()
157
+ bias = self.bias.to_dense()
158
+ running_mean = self.running_mean.to_dense()
159
+ running_var = self.running_var.to_dense()
160
+ return (weight, bias, running_mean, running_var, self.training)
161
+
162
+ @torch.jit.script_method
163
+ def __setstate__(self, state):
164
+ self.weight = state[0].to_mkldnn()
165
+ self.bias = state[1].to_mkldnn()
166
+ self.running_mean = state[2].to_mkldnn()
167
+ self.running_var = state[3].to_mkldnn()
168
+ self.training = state[4]
169
+
170
+ @torch.jit.script_method
171
+ def forward(self, x):
172
+ return torch.batch_norm(
173
+ x,
174
+ self.weight,
175
+ self.bias,
176
+ self.running_mean,
177
+ self.running_var,
178
+ False, # training
179
+ self.exponential_average_factor,
180
+ self.eps,
181
+ False, # cuda_enabled
182
+ )
183
+
184
+ class MkldnnPrelu(torch.jit.ScriptModule):
185
+ def __init__(self, dense_module, dtype):
186
+ super().__init__()
187
+ self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype))
188
+
189
+ @torch.jit.script_method
190
+ def __getstate__(self):
191
+ return (self.weight.to_dense(), self.training)
192
+
193
+ @torch.jit.script_method
194
+ def __setstate__(self, state):
195
+ self.weight = state[0].to_mkldnn()
196
+ self.training = state[1]
197
+
198
+ @torch.jit.script_method
199
+ def forward(self, x):
200
+ x_mkldnn = x if x.is_mkldnn else x.to_mkldnn()
201
+ y_mkldnn = torch.prelu(x_mkldnn, self.weight)
202
+ y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense()
203
+ return y
204
+
205
+ def to_mkldnn(module, dtype=torch.float):
206
+ assert dtype in [torch.float, torch.bfloat16, torch.half], \
207
+ "MKLDNN only support float, bfloat16, and half path now"
208
+
209
+ def m_fn(m, d):
210
+ if isinstance(m, torch.nn.Linear):
211
+ return MkldnnLinear(m, d)
212
+ elif isinstance(m, torch.nn.Conv1d):
213
+ return MkldnnConv1d(m, d)
214
+ elif isinstance(m, torch.nn.Conv2d):
215
+ return MkldnnConv2d(m, d)
216
+ elif isinstance(m, torch.nn.Conv3d):
217
+ return MkldnnConv3d(m, d)
218
+ elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
219
+ # For batchnorm bf16 path, OneDNN requires weight and bias need fp32 dtype.
220
+ # so it doesn't need dtype argument.
221
+ return MkldnnBatchNorm(m)
222
+ elif isinstance(m, torch.nn.PReLU):
223
+ return MkldnnPrelu(m, d)
224
+ else:
225
+ return m
226
+
227
+ def m_fn_rec(m, d):
228
+ new_m = m_fn(m, d)
229
+ for name, sub_m in m.named_children():
230
+ setattr(new_m, name, m_fn_rec(sub_m, d))
231
+ return new_m
232
+
233
+ return m_fn_rec(module, dtype)