applied-ai-018 commited on
Commit
74ee0e3
·
verified ·
1 Parent(s): 9e86264

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step20/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step20/zero/13.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step20/zero/23.post_attention_layernorm.weight/exp_avg.pt +3 -0
  8. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/torch/compiler/__init__.py +193 -0
  19. venv/lib/python3.10/site-packages/torch/compiler/__pycache__/__init__.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/torch/cuda/_memory_viz.py +626 -0
  21. venv/lib/python3.10/site-packages/torch/cuda/_sanitizer.py +622 -0
  22. venv/lib/python3.10/site-packages/torch/cuda/_utils.py +38 -0
  23. venv/lib/python3.10/site-packages/torch/cuda/comm.py +18 -0
  24. venv/lib/python3.10/site-packages/torch/cuda/error.py +0 -0
  25. venv/lib/python3.10/site-packages/torch/cuda/graphs.py +479 -0
  26. venv/lib/python3.10/site-packages/torch/cuda/nccl.py +137 -0
  27. venv/lib/python3.10/site-packages/torch/cuda/nvtx.py +91 -0
  28. venv/lib/python3.10/site-packages/torch/cuda/random.py +179 -0
  29. venv/lib/python3.10/site-packages/torch/cuda/sparse.py +1 -0
  30. venv/lib/python3.10/site-packages/torch/cuda/streams.py +241 -0
  31. venv/lib/python3.10/site-packages/torch/func/__init__.py +13 -0
  32. venv/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h +143 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h +6 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h +36 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h +9 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h +25 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h +19 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h +30 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h +1101 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h +10 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h +71 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h +104 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h +32 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h +29 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h +56 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h +288 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h +210 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h +763 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h +64 -0
ckpts/universal/global_step20/zero/11.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:861f15e14dc04a7e84926a0caf18337fbeffc2cc167d4fca9610d9619899c947
3
+ size 50332828
ckpts/universal/global_step20/zero/13.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbea918a3e344b6f7a0bca5e70e073611db7bfb91059b0dad28a81a952efe42d
3
+ size 33555612
ckpts/universal/global_step20/zero/13.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba24375d7fb0adf8833aaf79ba2a69690d2c66b2dfc257ac9151c0e677850f04
3
+ size 33555627
ckpts/universal/global_step20/zero/13.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74c4ebd27d77d87abca885dff7031a2273359dd3198082127d479a3f2b0365ca
3
+ size 33555533
ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e952efb7ae2c3eadd6e8d013ea7b6d6100c9393b15e218af3b81e4dd887df627
3
+ size 33555627
ckpts/universal/global_step20/zero/17.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b24e005bcf8f9f0e9c553d14c2008883967df3630bed2f8a2c199c91873289c0
3
+ size 33555533
ckpts/universal/global_step20/zero/23.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10cd68d418628369abdc6c49febd60c7d206d80850697acda63430f930e3e1cf
3
+ size 9372
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc ADDED
Binary file (5.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc ADDED
Binary file (818 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc ADDED
Binary file (946 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc ADDED
Binary file (7.22 kB). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc ADDED
Binary file (647 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc ADDED
Binary file (986 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc ADDED
Binary file (732 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc ADDED
Binary file (409 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/compiler/__init__.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import List
3
+
4
+ __all__ = [
5
+ "compile",
6
+ "assume_constant_result",
7
+ "reset",
8
+ "allow_in_graph",
9
+ "list_backends",
10
+ "disable",
11
+ "cudagraph_mark_step_begin",
12
+ "wrap_numpy",
13
+ "is_compiling",
14
+ "is_dynamo_compiling",
15
+ ]
16
+
17
+ def compile(*args, **kwargs):
18
+ """
19
+ See :func:`torch.compile` for details on the arguments for this function.
20
+ """
21
+ return torch.compile(*args, **kwargs)
22
+
23
+ def reset() -> None:
24
+ """
25
+ This function clears all compilation caches and restores the system to its initial state.
26
+ It is recommended to call this function, especially after using operations like `torch.compile(...)`
27
+ to ensure a clean state before another unrelated compilation
28
+ """
29
+ import torch._dynamo
30
+
31
+ torch._dynamo.reset()
32
+
33
+ def allow_in_graph(fn):
34
+ """
35
+ Customize which functions compilation will include in the generated graph.
36
+ It bypasses all introspection of the symbolic python code in favor of
37
+ directly writing it to the graph.
38
+ If fn is a list or tuple of callables it recursively applies :func:`allow_in_graph()`
39
+ to each function and returns a new list or tuple containing the modified functions
40
+
41
+ Args:
42
+ fn: A callable representing the function to be included in the graph.
43
+
44
+ .. warning::
45
+
46
+ :func:`allow_in_graph` skips TorchDynamo completely on the decorated function
47
+ skipping all TorchDynamo safety checks (graph breaks, handling closures, etc).
48
+ Therefore, one has to be very careful with :func:`allow_in_graph` since subsystems
49
+ like AOT Autograd rely on torchdynamo
50
+ If not careful, this could lead to soundness and really hard-to-debug issues.
51
+
52
+ """
53
+ import torch._dynamo
54
+
55
+ return torch._dynamo.allow_in_graph(fn)
56
+
57
+
58
+ def list_backends(exclude_tags=("debug", "experimental")) -> List[str]:
59
+ """
60
+ Return valid strings that can be passed to `torch.compile(..., backend="name")`.
61
+
62
+ Args:
63
+ exclude_tags(optional): A tuple of strings representing tags to exclude.
64
+ """
65
+ import torch._dynamo
66
+
67
+ return torch._dynamo.list_backends(exclude_tags)
68
+
69
+ def assume_constant_result(fn):
70
+ """
71
+ This function is used to mark a function `fn` as having a constant result.
72
+ This allows the compiler to optimize away your function
73
+ Returns The same function `fn`
74
+
75
+ Args:
76
+ fn: The function to be marked as having a constant result.
77
+
78
+ .. warning::
79
+ `assume_constant_result` can if invalid cause safety and soundness issues, :func:`torch.compile`
80
+ will not attempt to validate whether the constant assumption is true or not
81
+
82
+ """
83
+ import torch._dynamo
84
+
85
+ return torch._dynamo.assume_constant_result(fn)
86
+
87
+ def disable(fn=None, recursive=True):
88
+ """
89
+ This function provides both a decorator and a context manager to disable compilation on a function
90
+ It also provides the option of recursively disabling called functions
91
+
92
+ Args:
93
+ fn (optional): The function to disable
94
+ recursive (optional): A boolean value indicating whether the disabling should be recursive.
95
+ """
96
+ import torch._dynamo
97
+
98
+ return torch._dynamo.disable(fn, recursive)
99
+
100
+ def cudagraph_mark_step_begin():
101
+ """
102
+ Indicates that a new iteration of inference or training is about to begin.
103
+
104
+ CUDA Graphs will free tensors of a prior iteration. A new iteration is started on each invocation of
105
+ torch.compile, so long as there is not a pending backward that has not been called.
106
+
107
+ If that heuristic is wrong, such as in the following example, manually mark it with this api.
108
+
109
+ .. code-block:: python
110
+
111
+ @torch.compile(mode="reduce-overhead")
112
+ def rand_foo():
113
+ return torch.rand([4], device="cuda")
114
+
115
+ for _ in range(5):
116
+ torch.compiler.cudagraph_mark_step_begin()
117
+ rand_foo() + rand_foo()
118
+
119
+ For more details, see `torch.compiler_cudagraph_trees <https://pytorch.org/docs/main/torch.compiler_cudagraph_trees.html>`__
120
+ """
121
+ from torch._inductor import cudagraph_trees
122
+
123
+ cudagraph_trees.mark_step_begin()
124
+
125
+ def wrap_numpy(fn):
126
+ r"""Decorator that turns a function from ``np.ndarray``s to ``np.ndarray``s into a function
127
+ from ``torch.Tensor``s to ``torch.Tensor``s.
128
+
129
+ It is designed to be used with :func:`torch.compile` with ``fullgraph=True``. It allows to
130
+ compile a NumPy function as if it were a PyTorch function. This allows you to run NumPy code
131
+ on CUDA or compute its gradients.
132
+
133
+ .. note::
134
+
135
+ This decorator does not work without :func:`torch.compile`.
136
+
137
+ Example::
138
+
139
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
140
+ >>> # Compile a NumPy function as a Tensor -> Tensor function
141
+ >>> @torch.compile(fullgraph=True)
142
+ >>> @torch.compiler.wrap_numpy
143
+ >>> def fn(a: np.ndarray):
144
+ >>> return np.sum(a * a)
145
+ >>> # Execute the NumPy function using Tensors on CUDA and compute the gradients
146
+ >>> x = torch.arange(6, dtype=torch.float32, device="cuda", requires_grad=True)
147
+ >>> out = fn(x)
148
+ >>> out.backward()
149
+ >>> print(x.grad)
150
+ tensor([ 0., 2., 4., 6., 8., 10.], device='cuda:0')
151
+ """
152
+ from torch._dynamo.external_utils import wrap_numpy as wrap
153
+ return wrap(fn)
154
+
155
+ _is_compiling_flag: bool = False
156
+
157
+ def is_compiling() -> bool:
158
+ """
159
+ Indicates whether a graph is executed/traced as part of torch.compile() or torch.export().
160
+
161
+ Note that there are 2 other related flags that should deprecated eventually:
162
+ * torch._dynamo.external_utils.is_compiling()
163
+ * torch._utils.is_compiling()
164
+
165
+ Example::
166
+
167
+ >>> def forward(self, x):
168
+ >>> if not torch.compiler.is_compiling():
169
+ >>> ...logic that is not needed in a compiled/traced graph...
170
+ >>>
171
+ >>> ...rest of the function...
172
+ """
173
+ if torch.jit.is_scripting():
174
+ return False
175
+ else:
176
+ return _is_compiling_flag
177
+
178
+ def is_dynamo_compiling() -> bool:
179
+ """
180
+ Indicates whether a graph is traced via TorchDynamo.
181
+
182
+ It's stricter than is_compiling() flag, as it would only be set to True when
183
+ TorchDynamo is used.
184
+
185
+ Example::
186
+
187
+ >>> def forward(self, x):
188
+ >>> if not torch.compiler.is_dynamo_compiling():
189
+ >>> ...logic that is not needed in a TorchDynamo-traced graph...
190
+ >>>
191
+ >>> ...rest of the function...
192
+ """
193
+ return False
venv/lib/python3.10/site-packages/torch/compiler/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.99 kB). View file
 
venv/lib/python3.10/site-packages/torch/cuda/_memory_viz.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import sys
3
+ import os
4
+ import io
5
+ import subprocess
6
+ import json
7
+ from functools import lru_cache
8
+ from typing import Any
9
+ from itertools import groupby
10
+ import base64
11
+ import warnings
12
+
13
+ cache = lru_cache(None)
14
+
15
+ __all__ = ["format_flamegraph", "segments", "memory", "compare"]
16
+
17
+ def _frame_fmt(f, full_filename=False):
18
+ i = f['line']
19
+ fname = f['filename']
20
+ if not full_filename:
21
+ fname = fname.split('/')[-1]
22
+ func = f['name']
23
+ return f'{fname}:{i}:{func}'
24
+
25
+ @cache
26
+ def _frame_filter(name, filename):
27
+ omit_functions = [
28
+ "unwind::unwind",
29
+ "CapturedTraceback::gather",
30
+ "gather_with_cpp",
31
+ "_start",
32
+ "__libc_start_main",
33
+ "PyEval_",
34
+ "PyObject_",
35
+ "PyFunction_",
36
+ ]
37
+ omit_filenames = [
38
+ "core/boxing",
39
+ "/Register",
40
+ "/Redispatch",
41
+ "pythonrun.c",
42
+ "Modules/main.c",
43
+ "Objects/call.c",
44
+ "Objects/methodobject.c",
45
+ "pycore_ceval.h",
46
+ "ceval.c",
47
+ "cpython/abstract.h",
48
+ ]
49
+ for of in omit_functions:
50
+ if of in name:
51
+ return False
52
+ for of in omit_filenames:
53
+ if of in filename:
54
+ return False
55
+ return True
56
+
57
+ def _frames_fmt(frames, full_filename=False, reverse=False):
58
+ if reverse:
59
+ frames = reversed(frames)
60
+ return [_frame_fmt(f, full_filename) for f in frames if _frame_filter(f['name'], f['filename'])]
61
+
62
+ def _block_extra_legacy(b):
63
+ if 'history' in b:
64
+ frames = b['history'][0].get('frames', [])
65
+ real_size = b['history'][0]['real_size']
66
+ else:
67
+ real_size = b.get('requested_size', b['size'])
68
+ frames = []
69
+ return frames, real_size
70
+
71
+ def _block_extra(b):
72
+ if 'frames' not in b:
73
+ # old snapshot format made it more complicated to get frames/allocated size
74
+ return _block_extra_legacy(b)
75
+ return b['frames'], b['requested_size']
76
+
77
+ def format_flamegraph(flamegraph_lines, flamegraph_script=None):
78
+ if flamegraph_script is None:
79
+ flamegraph_script = f'/tmp/{os.getuid()}_flamegraph.pl'
80
+ if not os.path.exists(flamegraph_script):
81
+ import urllib.request
82
+ print(f"Downloading flamegraph.pl to: {flamegraph_script}")
83
+ urllib.request.urlretrieve(
84
+ 'https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl', flamegraph_script)
85
+ subprocess.check_call(['chmod', '+x', flamegraph_script])
86
+ args = [flamegraph_script, '--countname', 'bytes']
87
+ p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf-8')
88
+ assert p.stdin is not None
89
+ assert p.stdout is not None
90
+ p.stdin.write(flamegraph_lines)
91
+ p.stdin.close()
92
+ result = p.stdout.read()
93
+ p.stdout.close()
94
+ p.wait()
95
+ assert p.wait() == 0
96
+ return result
97
+
98
+ def _write_blocks(f, prefix, blocks):
99
+ def frames_fragment(frames):
100
+ if not frames:
101
+ return "<non-python>"
102
+ return ';'.join(_frames_fmt(frames, reverse=True))
103
+ for b in blocks:
104
+ if 'history' not in b:
105
+ frames, accounted_for_size = _block_extra(b)
106
+ f.write(f'{prefix};{b["state"]};{frames_fragment(frames)} {accounted_for_size}\n')
107
+ else:
108
+ accounted_for_size = 0
109
+ for h in b['history']:
110
+ sz = h['real_size']
111
+ accounted_for_size += sz
112
+ if 'frames' in h:
113
+ frames = h['frames']
114
+ f.write(f'{prefix};{b["state"]};{frames_fragment(frames)} {sz}\n')
115
+ else:
116
+ f.write(f'{prefix};{b["state"]};<no-context> {sz}\n')
117
+ gaps = b['size'] - accounted_for_size
118
+ if gaps:
119
+ f.write(f'{prefix};{b["state"]};<gaps> {gaps}\n')
120
+
121
+ def segments(snapshot, format_flamegraph=format_flamegraph):
122
+ f = io.StringIO()
123
+ for seg in snapshot['segments']:
124
+ prefix = f'stream_{seg["stream"]};seg_{seg["address"]}'
125
+ _write_blocks(f, prefix, seg['blocks'])
126
+ return format_flamegraph(f.getvalue())
127
+
128
+ def memory(snapshot, format_flamegraph=format_flamegraph):
129
+ f = io.StringIO()
130
+ for seg in snapshot['segments']:
131
+ prefix = f'stream_{seg["stream"]}'
132
+ _write_blocks(f, prefix, seg['blocks'])
133
+ return format_flamegraph(f.getvalue())
134
+
135
+ def compare(before, after, format_flamegraph=format_flamegraph):
136
+ def _seg_key(seg):
137
+ return (seg['address'], seg['total_size'])
138
+
139
+ def _seg_info(seg):
140
+ return f'stream_{seg["stream"]};seg_{seg["address"]}'
141
+
142
+ f = io.StringIO()
143
+
144
+ before_segs = {_seg_key(seg) for seg in before}
145
+ after_segs = {_seg_key(seg) for seg in after}
146
+
147
+ print(f'only_before = {[a for a,_ in (before_segs - after_segs)]}')
148
+ print(f'only_after = {[a for a,_ in (after_segs - before_segs)]}')
149
+
150
+ for seg in before:
151
+ if _seg_key(seg) not in after_segs:
152
+ _write_blocks(f, f'only_before;{_seg_info(seg)}', seg['blocks'])
153
+
154
+ for seg in after:
155
+ if _seg_key(seg) not in before_segs:
156
+ _write_blocks(f, f'only_after;{_seg_info(seg)}', seg['blocks'])
157
+
158
+ return format_flamegraph(f.getvalue())
159
+
160
+ def _format_size(num):
161
+ # https://stackoverflow.com/questions/1094841/get-human-readable-version-of-file-size
162
+ for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
163
+ if abs(num) < 1024.0:
164
+ return f"{num:3.1f}{unit}B"
165
+ num /= 1024.0
166
+ return f"{num:.1f}YiB"
167
+
168
+ class Bytes:
169
+ def __init__(self, value):
170
+ self.value = value
171
+
172
+ def __add__(self, rhs):
173
+ return Bytes(self.value + rhs)
174
+
175
+ def __repr__(self):
176
+ return _format_size(self.value)
177
+
178
+ def calc_active(seg):
179
+ return sum(b['size'] for b in seg['blocks'] if b['state'] == 'active_allocated')
180
+
181
+ def _report_free(free_external, free_internal):
182
+ total = free_external + free_internal
183
+ suffix = ''
184
+ if total != 0:
185
+ pct = (free_internal / total) * 100
186
+ suffix = f' ({pct:.1f}% internal)'
187
+ return f'{Bytes(total)}{suffix}'
188
+
189
+ PAGE_SIZE = 1024 * 1024 * 20
190
+ legend = f"""\
191
+
192
+ Legend:
193
+ [a ] - a segment in the allocator
194
+ ^-- a page {Bytes(PAGE_SIZE)} of memory in the segment
195
+ a-z: pages filled with a single block's content
196
+ ' ': page is completely free
197
+ *: page if completely full with multiple blocks
198
+ 0-9: page is partially full with tensors of multiple blocks (9 == 90% full)
199
+ (X% internal) - of the free memory, X% is free because we rounded the size of the allocation.
200
+ """
201
+
202
+ def segsum(data):
203
+ r"""Visually reports how the allocator has filled its segments.
204
+
205
+ This printout can help debug fragmentation issues since free fragments
206
+ will appear as gaps in this printout. The amount of free space is reported
207
+ for each segment.
208
+ We distinguish between internal free memory which occurs because the
209
+ allocator rounds the allocation size, and external free memory, which are
210
+ the gaps between allocations in a segment.
211
+ Args:
212
+ data: snapshot dictionary created from _snapshot()
213
+ """
214
+ segments = []
215
+ out = io.StringIO()
216
+ out.write(f"Summary of segments >= {Bytes(PAGE_SIZE)} in size\n")
217
+ total_reserved = 0
218
+ total_allocated = 0
219
+ free_external = 0
220
+ free_internal = 0
221
+ for seg in sorted(data['segments'], key=lambda x: (x['total_size'], calc_active(x))):
222
+ total_reserved += seg['total_size']
223
+
224
+ seg_free_external = 0
225
+ seg_free_internal = 0
226
+ seg_allocated = 0
227
+ all_ranges = []
228
+ boffset = 0
229
+ for b in seg['blocks']:
230
+ active = b['state'] == 'active_allocated'
231
+ if active:
232
+ _, allocated_size = _block_extra(b)
233
+ all_ranges.append((boffset, allocated_size, True))
234
+ seg_allocated += allocated_size
235
+ seg_free_internal += b['size'] - allocated_size
236
+ else:
237
+ seg_free_external += b['size']
238
+
239
+ boffset += b['size']
240
+
241
+ total_allocated += seg_allocated
242
+ free_external += seg_free_external
243
+ free_internal += seg_free_internal
244
+
245
+ nseg = (seg['total_size'] - 1) // PAGE_SIZE + 1
246
+ occupied = [' ' for _ in range(nseg)]
247
+ frac = [0.0 for _ in range(nseg)]
248
+ active_size = 0
249
+ for i, (start_, size, active) in enumerate(all_ranges):
250
+ active_size += size
251
+ finish_ = (start_ + size)
252
+ start = start_ // PAGE_SIZE
253
+ finish = (finish_ - 1) // PAGE_SIZE + 1
254
+ m = chr(ord('a' if active else 'A') + (i % 26))
255
+ for j in range(start, finish):
256
+ s = max(start_, j * PAGE_SIZE)
257
+ e = min(finish_, (j + 1) * PAGE_SIZE)
258
+ frac[j] += (e - s) / PAGE_SIZE
259
+ if occupied[j] != ' ':
260
+ occupied[j] = '0123456789*'[int(frac[j] * 10)]
261
+ else:
262
+ occupied[j] = m
263
+ stream = '' if seg['stream'] == 0 else f', stream_{seg["stream"]}'
264
+ body = ''.join(occupied)
265
+ assert seg_free_external + seg_free_internal + seg_allocated == seg['total_size']
266
+ stream = f' stream_{seg["stream"]}' if seg['stream'] != 0 else ''
267
+ if seg['total_size'] >= PAGE_SIZE:
268
+ out.write(f'[{body}] {Bytes(seg["total_size"])} allocated, '
269
+ f'{_report_free(seg_free_external, seg_free_internal)} free{stream}\n')
270
+ out.write(f'segments: {len(data["segments"])}\n')
271
+ out.write(f'total_reserved: {Bytes(total_reserved)}\n')
272
+ out.write(f'total_allocated: {Bytes(total_allocated)}\n')
273
+ internal_external = f' ({Bytes(free_internal)} internal + {Bytes(free_external)} external)' if free_internal else ''
274
+ out.write(f'total_free: {_report_free(free_external, free_internal)}\n')
275
+ out.write(legend)
276
+ assert free_internal + free_external + total_allocated == total_reserved
277
+ return out.getvalue()
278
+
279
+ def trace(data):
280
+ out = io.StringIO()
281
+
282
+ def format(entries):
283
+ segment_intervals : list = []
284
+ segment_addr_to_name = {}
285
+ allocation_addr_to_name = {}
286
+
287
+ free_names : list = []
288
+ next_name = 0
289
+
290
+ def _name():
291
+ nonlocal next_name
292
+ if free_names:
293
+ return free_names.pop()
294
+ r, m = next_name // 26, next_name % 26
295
+ next_name += 1
296
+ return f'{chr(ord("a") + m)}{"" if r == 0 else r}'
297
+
298
+ def find_segment(addr):
299
+ for name, saddr, size in segment_intervals:
300
+ if addr >= saddr and addr < saddr + size:
301
+ return name, saddr
302
+ for i, seg in enumerate(data['segments']):
303
+ saddr = seg['address']
304
+ size = seg['allocated_size']
305
+ if addr >= saddr and addr < saddr + size:
306
+ return f'seg_{i}', saddr
307
+ return None, None
308
+ count = 0
309
+ out.write(f'{len(entries)} entries\n')
310
+
311
+
312
+ total_reserved = 0
313
+ for seg in data['segments']:
314
+ total_reserved += seg['total_size']
315
+
316
+ for count, e in enumerate(entries):
317
+ if e['action'] == 'alloc':
318
+ addr, size = e['addr'], e['size']
319
+ n = _name()
320
+ seg_name, seg_addr = find_segment(addr)
321
+ if seg_name is None:
322
+ seg_name = "MEM"
323
+ offset = addr
324
+ else:
325
+ offset = addr - seg_addr
326
+ out.write(f'{n} = {seg_name}[{offset}:{Bytes(size)}]\n')
327
+ allocation_addr_to_name[addr] = (n, size, count)
328
+ count += size
329
+ elif e['action'] == 'free_requested':
330
+ addr, size = e['addr'], e['size']
331
+ name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None))
332
+ out.write(f'del {name} # {Bytes(size)}\n')
333
+ elif e['action'] == 'free_completed':
334
+ addr, size = e['addr'], e['size']
335
+ count -= size
336
+ name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None))
337
+ out.write(f'# free completed for {name} {Bytes(size)}\n')
338
+ if name in allocation_addr_to_name:
339
+ free_names.append(name)
340
+ del allocation_addr_to_name[name]
341
+ elif e['action'] == 'segment_alloc':
342
+ addr, size = e['addr'], e['size']
343
+ name = _name()
344
+ out.write(f'{name} = cudaMalloc({addr}, {Bytes(size)})\n')
345
+ segment_intervals.append((name, addr, size))
346
+ segment_addr_to_name[addr] = name
347
+ elif e['action'] == 'segment_free':
348
+ addr, size = e['addr'], e['size']
349
+ name = segment_addr_to_name.get(addr, addr)
350
+ out.write(f'cudaFree({name}) # {Bytes(size)}\n')
351
+ if name in segment_addr_to_name:
352
+ free_names.append(name)
353
+ del segment_addr_to_name[name]
354
+ elif e['action'] == 'oom':
355
+ size = e['size']
356
+ free = e['device_free']
357
+ out.write(f'raise OutOfMemoryError() # {Bytes(size)} requested, {Bytes(free)} free in CUDA\n')
358
+ else:
359
+ out.write(f'{e}\n')
360
+ out.write(f"TOTAL MEM: {Bytes(count)}")
361
+ for i, d in enumerate(data['device_traces']):
362
+ if d:
363
+ out.write(f'Device {i} ----------------\n')
364
+ format(d)
365
+ return out.getvalue()
366
+
367
+
368
+ _memory_viz_template = r"""
369
+ <!DOCTYPE html>
370
+ <html>
371
+ <head>
372
+ </head>
373
+ <body>
374
+ <script type="module">
375
+ import {add_local_files} from "https://cdn.jsdelivr.net/gh/pytorch/pytorch@main/torch/utils/viz/MemoryViz.js"
376
+ const local_files = $SNAPSHOT
377
+ add_local_files(local_files, $VIZ_KIND)
378
+ </script>
379
+ </body>
380
+ """
381
+
382
+ def _format_viz(data, viz_kind, device):
383
+ if device is not None:
384
+ warnings.warn('device argument is deprecated, plots now contain all device')
385
+ buffer = pickle.dumps(data)
386
+ buffer += b'\x00' * (3 - len(buffer) % 3)
387
+ # Encode the buffer with base64
388
+ encoded_buffer = base64.b64encode(buffer).decode('utf-8')
389
+
390
+ json_format = json.dumps([{"name": 'snapshot.pickle', "base64": encoded_buffer}])
391
+ return _memory_viz_template.replace('$VIZ_KIND', repr(viz_kind)) \
392
+ .replace('$SNAPSHOT', json_format)
393
+
394
+ def trace_plot(data, device=None, plot_segments=False):
395
+ """Generate a visualization over time of the memory usage recorded by the trace as an html file.
396
+
397
+ Args:
398
+ data: Memory snapshot as generated from torch.cuda.memory._snapshot()
399
+ device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations.
400
+ plot_segments (bool, optional): Plots memory returned from cudaMalloc, rather than individual allocations.
401
+ Defaults to False.
402
+
403
+ Returns:
404
+ str: HTML of visualization
405
+ """
406
+ return _format_viz(data, 'Active Memory Timeline' if not plot_segments else 'Active Cached Memory Timeline', device)
407
+
408
+
409
+ def _profile_to_snapshot(profile):
410
+ import torch
411
+ from torch.profiler._memory_profiler import Action, TensorKey
412
+ from torch._C._profiler import _EventType
413
+ memory_profile = profile._memory_profile()
414
+
415
+ allocation_stacks = {}
416
+ for event in memory_profile._op_tree.sorted_nodes:
417
+ if event.tag == _EventType.Allocation:
418
+ parent = event.parent
419
+ python_parents = []
420
+ while parent:
421
+ if parent.tag in (_EventType.PyCall, _EventType.PyCCall):
422
+ python_parents.append(parent)
423
+ parent = parent.parent
424
+ key = TensorKey.from_allocation(event.extra_fields)
425
+
426
+ # Corner case: If allocation doesn't have an ID (can't prove it was used as a Tensor)
427
+ # key will be None. I should add some way to identify these, I just haven't yet.
428
+ if key and event.extra_fields.alloc_size > 0:
429
+ allocation_stacks[key] = python_parents
430
+
431
+
432
+ device_count = torch.cuda.device_count()
433
+ snapshot = {
434
+ 'device_traces': [[] for _ in range(device_count + 1)],
435
+ 'segments': [{'device': device,
436
+ 'address': None,
437
+ 'total_size': 0,
438
+ 'stream': 0,
439
+ 'blocks': []} for device in range(device_count + 1)]
440
+ }
441
+
442
+ def to_device(device):
443
+ if device.type == 'cuda':
444
+ return device.index
445
+ else:
446
+ return device_count
447
+
448
+ def allocate(size, tensor_key, version, during_trace=True):
449
+ device = to_device(tensor_key.device)
450
+ addr = tensor_key.storage.ptr
451
+
452
+ seg = snapshot['segments'][device] # type: ignore[index]
453
+ if seg['address'] is None or seg['address'] > addr:
454
+ seg['address'] = addr
455
+ seg['total_size'] = max(seg['total_size'], addr + size) # record max addr for now, we will make it the size later
456
+ category = memory_profile._categories.get(tensor_key, version)
457
+ category = category.name.lower() if category is not None else "unknown"
458
+ stack = allocation_stacks.get(tensor_key, ())
459
+ stack = [{'filename': 'none', 'line': 0, 'name': p.name} for p in stack]
460
+ r = {'action': 'alloc', 'addr': addr, 'size': size, 'stream': 0, 'frames': stack, 'category': category}
461
+ if during_trace:
462
+ snapshot['device_traces'][device].append(r) # type: ignore[index]
463
+ return r
464
+
465
+ def free(alloc, device):
466
+ for e in ('free_requested', 'free_completed'):
467
+ snapshot['device_traces'][device].append({'action': e, # type: ignore[index]
468
+ 'addr': alloc['addr'],
469
+ 'size': alloc['size'],
470
+ 'stream': 0,
471
+ 'frames': alloc['frames']})
472
+
473
+ kv_to_elem = {}
474
+
475
+
476
+
477
+ # create the device trace
478
+ for time, action, (tensor_key, version), size in memory_profile.timeline:
479
+ if not isinstance(tensor_key, TensorKey):
480
+ continue
481
+ if action == Action.CREATE:
482
+ kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version)
483
+ elif action == Action.DESTROY:
484
+ free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device))
485
+ elif action == Action.INCREMENT_VERSION:
486
+ free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device))
487
+ kv_to_elem[(tensor_key, version + 1)] = allocate(size, tensor_key, version + 1)
488
+ elif action == Action.PREEXISTING:
489
+ kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version, during_trace=False)
490
+
491
+
492
+ # create the final snapshot state
493
+ blocks_at_end = [(to_device(tensor_key.device), event['addr'], event['size'], event['frames'])
494
+ for (tensor_key, version), event in kv_to_elem.items()]
495
+ for device, blocks in groupby(sorted(blocks_at_end), key=lambda x: x[0]):
496
+ seg = snapshot['segments'][device] # type: ignore[index]
497
+ last_addr = seg['address']
498
+ for _, addr, size, frames in blocks:
499
+ if last_addr < addr:
500
+ seg['blocks'].append({'size': addr - last_addr, 'state': 'inactive'})
501
+ seg['blocks'].append({'size': size, 'state': 'active_allocated', 'requested_size': size, 'frames': frames})
502
+ last_addr = addr + size
503
+ if last_addr < seg['total_size']:
504
+ seg['blocks'].append({'size': seg['total_size'] - last_addr, 'state': 'inactive'})
505
+
506
+ snapshot['segments'] = [seg for seg in snapshot['segments'] if seg['blocks']] # type: ignore[attr-defined]
507
+ for seg in snapshot['segments']: # type: ignore[attr-defined, name-defined, no-redef]
508
+ seg['total_size'] -= seg['address']
509
+ if not seg['blocks']:
510
+ seg['blocks'].append({'size': seg['total_size'], 'state': 'inactive'})
511
+
512
+ return snapshot
513
+
514
+ def profile_plot(profile, device=None):
515
+ """Generate a visualization over time of the memory usage recorded by kineto memory profiling as an html file.
516
+
517
+ Args:
518
+ profile: profile as generated by `torch.profiler.profile(profile_memory=True)`
519
+ device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations.
520
+
521
+ Returns:
522
+ str: HTML of visualization
523
+ """
524
+ snapshot = _profile_to_snapshot(profile)
525
+ return _format_viz(snapshot, 'Active Memory Timeline', device)
526
+
527
+
528
+ def segment_plot(data: Any, device=None):
529
+ return _format_viz(data, 'Allocator State History', device)
530
+
531
+ if __name__ == "__main__":
532
+ import os.path
533
+ thedir = os.path.realpath(os.path.dirname(__file__))
534
+ if thedir in sys.path:
535
+ # otherwise we find cuda/random.py as random...
536
+ sys.path.remove(thedir)
537
+ import argparse
538
+
539
+ fn_name = 'torch.cuda.memory._snapshot()'
540
+ pickled = f'pickled memory statistics from {fn_name}'
541
+ parser = argparse.ArgumentParser(description=f'Visualize memory dumps produced by {fn_name}')
542
+
543
+ subparsers = parser.add_subparsers(dest='action')
544
+
545
+ def _output(p):
546
+ p.add_argument('-o', '--output', default='output.svg', help='flamegraph svg (default: output.svg)')
547
+
548
+ description = 'Prints overall allocation statistics and a visualization of how the allocators segments are currently filled.'
549
+ stats_a = subparsers.add_parser('stats', description=description)
550
+ stats_a.add_argument('input', help=pickled)
551
+
552
+ description = 'Prints buffer of the most recent allocation events embedded in the snapshot in a Pythonic style.'
553
+ trace_a = subparsers.add_parser('trace', description=description)
554
+ trace_a.add_argument('input', help=pickled)
555
+
556
+ description = 'Generate a flamegraph that visualizes what memory is stored in each allocator segment (aka block)'
557
+ segments_a = subparsers.add_parser('segments', description=description)
558
+ segments_a.add_argument('input', help=pickled)
559
+ _output(segments_a)
560
+
561
+ description = "Generate a flamegraph the program locations contributing to CUDA memory usage."
562
+ memory_a = subparsers.add_parser('memory', description=description)
563
+ memory_a.add_argument('input', help=pickled)
564
+ _output(memory_a)
565
+
566
+ description = 'Generate a flamegraph that shows segments (aka blocks) that have been added ' \
567
+ 'or removed between two different memorys snapshots.'
568
+ compare_a = subparsers.add_parser('compare', description=description)
569
+ compare_a.add_argument('before', help=pickled)
570
+ compare_a.add_argument('after', help=pickled)
571
+ _output(compare_a)
572
+
573
+ plots = (
574
+ ("trace_plot", "Generate a visualization over time of the memory usage recorded by the trace as an html file."),
575
+ ("segment_plot", "Visualize how allocations are packed into allocator segments at each point in a trace as an html file.")
576
+ )
577
+ for cmd, description in plots:
578
+ trace_plot_a = subparsers.add_parser(cmd, description=description)
579
+ trace_plot_a.add_argument('input', help=pickled)
580
+ help = 'visualize trace from this device (default: chooses the only device with trace info or errors)'
581
+ trace_plot_a.add_argument('-d', '--device', type=int, default=None, help=help)
582
+ help = 'path to save the visualization(default: output.html)'
583
+ trace_plot_a.add_argument('-o', '--output', default='output.html', help=help)
584
+ if cmd == "trace_plot":
585
+ help = 'visualize change to segments rather than individual allocations'
586
+ trace_plot_a.add_argument('-s', '--segments', action='store_true', help=help)
587
+
588
+
589
+ args = parser.parse_args()
590
+
591
+ def _read(name):
592
+ if name == '-':
593
+ f = sys.stdin.buffer
594
+ else:
595
+ f = open(name, 'rb')
596
+ data = pickle.load(f)
597
+ if isinstance(data, list): # segments only...
598
+ data = {'segments': data, 'traces': []}
599
+ return data
600
+
601
+ def _write(name, data):
602
+ with open(name, 'w') as f:
603
+ f.write(data)
604
+
605
+ if args.action == 'segments':
606
+ data = _read(args.input)
607
+ _write(args.output, segments(data))
608
+ elif args.action == 'memory':
609
+ data = _read(args.input)
610
+ _write(args.output, memory(data))
611
+ elif args.action == 'stats':
612
+ data = _read(args.input)
613
+ print(segsum(data))
614
+ elif args.action == 'trace':
615
+ data = _read(args.input)
616
+ print(trace(data))
617
+ elif args.action == 'compare':
618
+ before = _read(args.before)
619
+ after = _read(args.after)
620
+ _write(args.output, compare(before, after))
621
+ elif args.action == 'trace_plot':
622
+ data = _read(args.input)
623
+ _write(args.output, trace_plot(data, device=args.device, plot_segments=args.segments))
624
+ elif args.action == 'segment_plot':
625
+ data = _read(args.input)
626
+ _write(args.output, segment_plot(data, device=args.device))
venv/lib/python3.10/site-packages/torch/cuda/_sanitizer.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ This module introduces CUDA Sanitizer, a tool for detecting synchronization errors between kernels ran on different streams.
3
+
4
+ It stores information on accesses to tensors to determine if they are synchronized
5
+ or not. When enabled in a python program and a possible data race is detected, a
6
+ detailed warning will be printed and the program will exit.
7
+
8
+ It can be enabled either by importing this module and calling
9
+ :func:`enable_cuda_sanitizer()` or by exporting the ``TORCH_CUDA_SANITIZER``
10
+ environment variable.
11
+ """
12
+
13
+ import enum
14
+ import functools
15
+ import inspect
16
+ import io
17
+ import logging
18
+ import sys
19
+ import textwrap
20
+ import traceback
21
+ from dataclasses import dataclass, field
22
+ from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, TypeVar
23
+
24
+ import torch
25
+ import torch.utils._cuda_trace as cuda_trace
26
+ from torch.utils import _pytree as pytree
27
+ from torch.utils._python_dispatch import TorchDispatchMode
28
+
29
+
30
+ DEFAULT_STREAM_ID = 0
31
+
32
+ TK = TypeVar("TK")
33
+ TVa = TypeVar("TVa")
34
+ TVb = TypeVar("TVb")
35
+
36
+ DataPtr = int
37
+ StreamId = int
38
+ EventId = int
39
+ SeqNum = int
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ class AccessType(enum.Enum):
45
+ READ = enum.auto()
46
+ WRITE = enum.auto()
47
+
48
+ def __str__(self):
49
+ return "reading from" if self is AccessType.READ else "writing to"
50
+
51
+
52
+ @dataclass
53
+ class Access:
54
+ r"""Stores information about a single access to a tensor by a kernel.
55
+
56
+ Args:
57
+ type: either AccessType.READ or AccessType.Write.
58
+ seq_num: the sequential number of the kernel performing the access.
59
+ stream: the stream id of the stream executing the kernel.
60
+ operator: the schema of the launched kernel, which lists the
61
+ arguments and return type.
62
+ aliases: the arguments in the schema this access corresponds to.
63
+ is_output: Whether the tensor was an output of the kernel.
64
+ stack_trace: the stack summary object captured during access.
65
+ """
66
+
67
+ type: AccessType
68
+ seq_num: SeqNum
69
+ stream: StreamId
70
+ operator: str
71
+ aliases: List[str]
72
+ is_output: bool
73
+ stack_trace: traceback.StackSummary
74
+
75
+
76
+ class SynchronizationError(Exception):
77
+ """Base class for errors detected by CUDA Sanitizer."""
78
+
79
+ pass
80
+
81
+
82
+ class UnsynchronizedAccessError(SynchronizationError):
83
+ """Stores information about two unsynchronized accesses to one data pointer."""
84
+
85
+ def __init__(
86
+ self,
87
+ data_ptr: DataPtr,
88
+ allocation_stack_trace: Optional[traceback.StackSummary],
89
+ current_access: Access,
90
+ previous_access: Access,
91
+ ):
92
+ self.data_ptr = data_ptr
93
+ self.allocation_stack_trace = allocation_stack_trace
94
+ self.current_access = current_access
95
+ self.previous_access = previous_access
96
+
97
+ def __str__(self):
98
+ def format_access(access: Access):
99
+ message.write(f"{access.operator}\n{access.type}")
100
+ if access.aliases:
101
+ message.write(" argument(s) " + ", ".join(access.aliases))
102
+ if access.is_output:
103
+ message.write(", and to")
104
+ if access.is_output:
105
+ message.write(" the output")
106
+ message.write(
107
+ f"\nWith stack trace:\n{''.join(access.stack_trace.format())}\n"
108
+ )
109
+
110
+ with io.StringIO() as message:
111
+ message.write(
112
+ textwrap.dedent(
113
+ f"""\
114
+ ============================
115
+ CSAN detected a possible data race on tensor with data pointer {self.data_ptr}
116
+ Access by stream {self.current_access.stream} during kernel:
117
+ """
118
+ )
119
+ )
120
+ format_access(self.current_access)
121
+
122
+ message.write(
123
+ f"Previous access by stream {self.previous_access.stream} during kernel:\n"
124
+ )
125
+ format_access(self.previous_access)
126
+
127
+ if self.allocation_stack_trace:
128
+ message.write(
129
+ "Tensor was allocated with stack trace:\n"
130
+ f"{''.join(self.allocation_stack_trace.format())}"
131
+ )
132
+ else:
133
+ message.write("Trace for tensor allocation not found.")
134
+ return message.getvalue()
135
+
136
+
137
+ class CUDASanitizerErrors(Exception):
138
+ """Wrapper class for errors reported by CUDA Sanitizer."""
139
+
140
+ def __init__(self, errors: List[SynchronizationError]):
141
+ self.errors = errors
142
+
143
+ def __str__(self):
144
+ return f"detected {len(self.errors)} errors"
145
+
146
+
147
+ @dataclass
148
+ class TensorInfo:
149
+ r"""Stores information about a single tensor and recent accesses to it.
150
+
151
+ Args:
152
+ allocation_stack_trace: the stack summary object captured during tensor
153
+ allocation. Can be ``None`` if the allocation wasn't caught by CSAN.
154
+ reads: list of read accesses to the tensor that were performed since
155
+ the last write.
156
+ write: the last write access to the tensor.
157
+ """
158
+
159
+ allocation_stack_trace: Optional[traceback.StackSummary]
160
+ reads: List[Access] = field(default_factory=list)
161
+ write: Optional[Access] = None
162
+
163
+
164
+ class _TensorsAccessed:
165
+ def __init__(self):
166
+ self.accesses: Dict[DataPtr, TensorInfo] = {}
167
+
168
+ def ensure_tensor_exists(self, data_ptr: DataPtr) -> None:
169
+ if data_ptr not in self.accesses:
170
+ logger.info(
171
+ "Found tensor with pointer: %s, but no matching tensor "
172
+ "allocation in the trace. Backfilling the trace now. "
173
+ "Perhaps the sanitizer was enabled after some torch operations?",
174
+ data_ptr,
175
+ )
176
+ self.create_tensor(data_ptr, None)
177
+
178
+ def ensure_tensor_does_not_exist(self, data_ptr: DataPtr) -> None:
179
+ if data_ptr in self.accesses:
180
+ logger.info(
181
+ "Found duplicate tensor allocation in the trace for tensor with "
182
+ "pointer: %s. Assuming the trace for tensor deallocation "
183
+ "wasn't caught and backfilling it now. "
184
+ "Perhaps the sanitizer was enabled after some torch operations?",
185
+ data_ptr,
186
+ )
187
+ self.delete_tensor(data_ptr)
188
+
189
+ def create_tensor(
190
+ self, data_ptr: DataPtr, stack_trace: Optional[traceback.StackSummary]
191
+ ) -> None:
192
+ self.accesses[data_ptr] = TensorInfo(stack_trace)
193
+
194
+ def delete_tensor(self, data_ptr: DataPtr) -> None:
195
+ del self.accesses[data_ptr]
196
+
197
+ def were_there_reads_since_last_write(self, data_ptr: DataPtr) -> bool:
198
+ return True if self.accesses[data_ptr].reads else False
199
+
200
+ def get_allocation_stack_trace(
201
+ self, data_ptr: DataPtr
202
+ ) -> Optional[traceback.StackSummary]:
203
+ return self.accesses[data_ptr].allocation_stack_trace
204
+
205
+ def get_write(self, data_ptr: DataPtr) -> Optional[Access]:
206
+ return self.accesses[data_ptr].write
207
+
208
+ def get_reads(self, data_ptr: DataPtr) -> List[Access]:
209
+ return self.accesses[data_ptr].reads
210
+
211
+ def add_read(self, data_ptr: DataPtr, access: Access) -> None:
212
+ self.accesses[data_ptr].reads.append(access)
213
+
214
+ def set_write(self, data_ptr: DataPtr, access: Access) -> None:
215
+ self.accesses[data_ptr].write = access
216
+ self.accesses[data_ptr].reads = []
217
+
218
+
219
+ class StreamSynchronizations:
220
+ def __init__(self):
221
+ self.current_sync_states: Dict[StreamId, Dict[StreamId, SeqNum]] = {}
222
+ self.recorded_sync_states: Dict[EventId, Dict[StreamId, SeqNum]] = {}
223
+ self.host_sync_state: Dict[StreamId, SeqNum] = {}
224
+ self.create_stream(DEFAULT_STREAM_ID)
225
+
226
+ def _ensure_stream_exists(self, stream: StreamId) -> None:
227
+ if stream not in self.current_sync_states:
228
+ logger.info(
229
+ "Found Stream with id: %s, but no matching stream "
230
+ "creation in the trace. Backfilling the trace now. "
231
+ "Perhaps the sanitizer was enabled after some torch operations?",
232
+ stream,
233
+ )
234
+ self.create_stream(stream)
235
+
236
+ def _ensure_event_exists(self, event: EventId) -> None:
237
+ if event not in self.recorded_sync_states:
238
+ logger.info(
239
+ "Found Event with id: %s, but no matching event "
240
+ "creation in the trace. Backfilling the trace now. "
241
+ "Perhaps the sanitizer was enabled after some torch operations?",
242
+ event,
243
+ )
244
+ self.create_event(event)
245
+
246
+ def _ensure_event_does_not_exist(self, event: EventId) -> None:
247
+ if event in self.recorded_sync_states:
248
+ logger.info(
249
+ "Found duplicate event creation in the trace for event with "
250
+ "id: %s. Assuming the trace for event deletion wasn't caught "
251
+ "and backfilling it now. "
252
+ "Perhaps the sanitizer was enabled after some torch operations?",
253
+ event,
254
+ )
255
+ self.delete_event(event)
256
+
257
+ def create_stream(self, stream: StreamId) -> None:
258
+ if stream in self.current_sync_states:
259
+ logger.info(
260
+ "Found duplicate Stream creation in the trace for Stream with "
261
+ "id: %s. PyTorch Streams are only created once, so this "
262
+ "trace entry is ignored.",
263
+ stream,
264
+ )
265
+ else:
266
+ self.host_sync_state[stream] = 0
267
+ self.current_sync_states[stream] = self.host_sync_state.copy()
268
+
269
+ def create_event(self, event: EventId) -> None:
270
+ self._ensure_event_does_not_exist(event)
271
+ self.recorded_sync_states[event] = {}
272
+
273
+ def delete_event(self, event: EventId) -> None:
274
+ self._ensure_event_exists(event)
275
+ del self.recorded_sync_states[event]
276
+
277
+ def update_seq_num(self, stream: StreamId, seq_num: SeqNum) -> None:
278
+ self._ensure_stream_exists(stream)
279
+ self.current_sync_states[stream][stream] = seq_num
280
+
281
+ def record_state(self, event: EventId, stream: StreamId) -> None:
282
+ self._ensure_event_exists(event)
283
+ self._ensure_stream_exists(stream)
284
+ self.recorded_sync_states[event] = self.current_sync_states[stream].copy()
285
+
286
+ def _state_wait_for_other(
287
+ self, state: Dict[StreamId, SeqNum], other: Dict[StreamId, SeqNum]
288
+ ) -> None:
289
+ for stream, seq_num in other.items():
290
+ state[stream] = max(state.get(stream, -1), seq_num)
291
+
292
+ def stream_wait_for_event(self, stream: StreamId, event: EventId) -> None:
293
+ self._ensure_stream_exists(stream)
294
+ self._ensure_event_exists(event)
295
+ self._state_wait_for_other(
296
+ self.current_sync_states[stream], self.recorded_sync_states[event]
297
+ )
298
+
299
+ def all_streams_wait_for_event(self, event: EventId) -> None:
300
+ self._ensure_event_exists(event)
301
+ for stream in self.current_sync_states.keys():
302
+ self.stream_wait_for_event(stream, event)
303
+
304
+ self._state_wait_for_other(
305
+ self.host_sync_state, self.recorded_sync_states[event]
306
+ )
307
+
308
+ def all_streams_wait_for_stream(self, stream: StreamId) -> None:
309
+ self._ensure_stream_exists(stream)
310
+ for state in self.current_sync_states.values():
311
+ self._state_wait_for_other(state, self.current_sync_states[stream])
312
+
313
+ self._state_wait_for_other(
314
+ self.host_sync_state, self.current_sync_states[stream]
315
+ )
316
+
317
+ def sync_all_streams(self) -> None:
318
+ for stream, state in self.current_sync_states.items():
319
+ self.host_sync_state[stream] = state[stream]
320
+
321
+ for state in self.current_sync_states.values():
322
+ self._state_wait_for_other(state, self.host_sync_state)
323
+
324
+ def is_ordered_after(
325
+ self, current_stream: StreamId, seq_num: SeqNum, other_stream: StreamId
326
+ ) -> bool:
327
+ self._ensure_stream_exists(current_stream)
328
+ self._ensure_stream_exists(other_stream)
329
+ return seq_num <= self.current_sync_states[current_stream].get(other_stream, -1)
330
+
331
+
332
+ class EventHandler:
333
+ """Analyzes CSAN trace for synchronization errors.
334
+
335
+ Stores information on each stream's synchronizations with other streams as well
336
+ as tensor accesses to determine whether a given kernel launch might cause a
337
+ data race.
338
+ """
339
+
340
+ def __init__(self):
341
+ self.tensors_accessed = _TensorsAccessed()
342
+ self.syncs = StreamSynchronizations()
343
+ self.seq_num: SeqNum = 0
344
+
345
+ def _handle_kernel_launch(
346
+ self,
347
+ stream: StreamId,
348
+ read_only: Set[DataPtr],
349
+ read_write: Set[DataPtr],
350
+ outputs: Set[DataPtr],
351
+ operator: str,
352
+ tensor_aliases: Dict[int, List[str]],
353
+ ) -> List[SynchronizationError]:
354
+ def check_conflict(
355
+ data_ptr: DataPtr, current_access: Access, previous_access: Optional[Access]
356
+ ) -> None:
357
+ if previous_access is None:
358
+ return
359
+ if not self.syncs.is_ordered_after(
360
+ current_access.stream, previous_access.seq_num, previous_access.stream
361
+ ):
362
+ error_list.append(
363
+ UnsynchronizedAccessError(
364
+ data_ptr,
365
+ self.tensors_accessed.get_allocation_stack_trace(data_ptr),
366
+ current_access,
367
+ previous_access,
368
+ )
369
+ )
370
+
371
+ error_list: List[SynchronizationError] = []
372
+ self.seq_num += 1
373
+ self.syncs.update_seq_num(stream, self.seq_num)
374
+ stack_trace = traceback.StackSummary.extract(
375
+ traceback.walk_stack(inspect.currentframe()), lookup_lines=False
376
+ )
377
+ # The stack trace generated in this way is in the inverse order, so it must be
378
+ # reversed.
379
+ stack_trace.reverse()
380
+
381
+ for data_ptr in read_only:
382
+ self.tensors_accessed.ensure_tensor_exists(data_ptr)
383
+ current_access = Access(
384
+ AccessType.READ,
385
+ self.seq_num,
386
+ stream,
387
+ operator,
388
+ tensor_aliases[data_ptr],
389
+ data_ptr in outputs,
390
+ stack_trace,
391
+ )
392
+ check_conflict(
393
+ data_ptr, current_access, self.tensors_accessed.get_write(data_ptr)
394
+ )
395
+ self.tensors_accessed.add_read(data_ptr, current_access)
396
+
397
+ for data_ptr in read_write:
398
+ self.tensors_accessed.ensure_tensor_exists(data_ptr)
399
+ current_access = Access(
400
+ AccessType.WRITE,
401
+ self.seq_num,
402
+ stream,
403
+ operator,
404
+ tensor_aliases[data_ptr],
405
+ data_ptr in outputs,
406
+ stack_trace,
407
+ )
408
+ if self.tensors_accessed.were_there_reads_since_last_write(data_ptr):
409
+ for previous_access in self.tensors_accessed.get_reads(data_ptr):
410
+ check_conflict(data_ptr, current_access, previous_access)
411
+ else:
412
+ check_conflict(
413
+ data_ptr, current_access, self.tensors_accessed.get_write(data_ptr)
414
+ )
415
+ self.tensors_accessed.set_write(data_ptr, current_access)
416
+
417
+ return error_list
418
+
419
+ def _handle_event_creation(self, event: EventId) -> None:
420
+ self.syncs.create_event(event)
421
+
422
+ def _handle_event_deletion(self, event: EventId) -> None:
423
+ self.syncs.delete_event(event)
424
+
425
+ def _handle_event_record(self, event: EventId, stream: StreamId) -> None:
426
+ self.syncs.record_state(event, stream)
427
+
428
+ def _handle_event_wait(self, event: EventId, stream: StreamId) -> None:
429
+ self.syncs.stream_wait_for_event(stream, event)
430
+
431
+ def _handle_memory_allocation(self, data_ptr: DataPtr) -> None:
432
+ self.tensors_accessed.ensure_tensor_does_not_exist(data_ptr)
433
+ stack_trace = traceback.StackSummary.extract(
434
+ traceback.walk_stack(inspect.currentframe()), lookup_lines=False
435
+ )
436
+ # The stack trace generated in this way is in the inverse order, so it must be
437
+ # reversed.
438
+ stack_trace.reverse()
439
+ self.tensors_accessed.create_tensor(
440
+ data_ptr,
441
+ stack_trace,
442
+ )
443
+
444
+ def _handle_memory_deallocation(self, data_ptr: DataPtr) -> None:
445
+ self.tensors_accessed.ensure_tensor_exists(data_ptr)
446
+ self.tensors_accessed.delete_tensor(data_ptr)
447
+
448
+ def _handle_stream_creation(self, stream: StreamId) -> None:
449
+ self.syncs.create_stream(stream)
450
+
451
+ def _handle_device_synchronization(self) -> None:
452
+ self.syncs.sync_all_streams()
453
+
454
+ def _handle_stream_synchronization(self, stream: StreamId) -> None:
455
+ self.syncs.all_streams_wait_for_stream(stream)
456
+
457
+ def _handle_event_synchronization(self, event: EventId) -> None:
458
+ self.syncs.all_streams_wait_for_event(event)
459
+
460
+
461
+ def zip_by_key(a: Dict[TK, TVa], b: Dict[TK, TVb]) -> Iterator[Tuple[TK, TVa, TVb]]:
462
+ for arg, value in a.items():
463
+ if arg in b:
464
+ yield arg, value, b[arg]
465
+
466
+
467
+ def zip_arguments(
468
+ schema: torch.FunctionSchema, args: Tuple[Any, ...], kwargs: Dict[str, Any]
469
+ ) -> Iterator[Tuple[torch.Argument, Any]]:
470
+ schema_args = schema.arguments[: len(args)]
471
+ schema_kwargs = {arg.name: arg for arg in schema.arguments[len(args) :]}
472
+
473
+ yield from zip(schema_args, args)
474
+
475
+ for _, argument, value in zip_by_key(schema_kwargs, kwargs):
476
+ yield (argument, value)
477
+
478
+
479
+ class ArgumentHandler:
480
+ def __init__(self):
481
+ self.dataptrs_read: Set[DataPtr] = set()
482
+ self.dataptrs_written: Set[DataPtr] = set()
483
+ self.tensor_aliases: Dict[DataPtr, List[str]] = dict()
484
+ self.outputs: Set[DataPtr] = set()
485
+
486
+ def _handle_argument(
487
+ self,
488
+ value: Any,
489
+ is_write: bool,
490
+ name: Optional[str] = None,
491
+ is_output: bool = False,
492
+ ) -> None:
493
+ if isinstance(value, torch.Tensor) and value.is_cuda:
494
+ data_ptr = value.data_ptr()
495
+ if is_write:
496
+ self.dataptrs_written.add(data_ptr)
497
+ else:
498
+ self.dataptrs_read.add(data_ptr)
499
+
500
+ self.tensor_aliases.setdefault(data_ptr, [])
501
+ if name is not None:
502
+ self.tensor_aliases[data_ptr].append(name)
503
+ if is_output:
504
+ self.outputs.add(data_ptr)
505
+
506
+ def parse_inputs(
507
+ self,
508
+ schema: torch.FunctionSchema,
509
+ args: Tuple[Any, ...],
510
+ kwargs: Dict[str, Any],
511
+ ) -> None:
512
+ for argument, value in zip_arguments(schema, args, kwargs):
513
+ is_write = argument.alias_info is not None and argument.alias_info.is_write
514
+ pytree.tree_map_(
515
+ functools.partial(
516
+ self._handle_argument, is_write=is_write, name=argument.name
517
+ ),
518
+ value,
519
+ )
520
+
521
+ def parse_outputs(self, outputs: Any) -> None:
522
+ pytree.tree_map_(
523
+ functools.partial(self._handle_argument, is_write=True, is_output=True),
524
+ outputs,
525
+ )
526
+
527
+
528
+ class CUDASanitizerDispatchMode(TorchDispatchMode):
529
+ def __init__(self):
530
+ self.event_handler = EventHandler()
531
+ torch._C._activate_cuda_trace()
532
+ cuda_trace.register_callback_for_cuda_event_creation(
533
+ self.event_handler._handle_event_creation
534
+ )
535
+ cuda_trace.register_callback_for_cuda_event_deletion(
536
+ self.event_handler._handle_event_deletion
537
+ )
538
+ cuda_trace.register_callback_for_cuda_event_record(
539
+ self.event_handler._handle_event_record
540
+ )
541
+ cuda_trace.register_callback_for_cuda_event_wait(
542
+ self.event_handler._handle_event_wait
543
+ )
544
+ cuda_trace.register_callback_for_cuda_memory_allocation(
545
+ self.event_handler._handle_memory_allocation
546
+ )
547
+ cuda_trace.register_callback_for_cuda_memory_deallocation(
548
+ self.event_handler._handle_memory_deallocation
549
+ )
550
+ cuda_trace.register_callback_for_cuda_stream_creation(
551
+ self.event_handler._handle_stream_creation
552
+ )
553
+ cuda_trace.register_callback_for_cuda_device_synchronization(
554
+ self.event_handler._handle_device_synchronization
555
+ )
556
+ cuda_trace.register_callback_for_cuda_stream_synchronization(
557
+ self.event_handler._handle_stream_synchronization
558
+ )
559
+ cuda_trace.register_callback_for_cuda_event_synchronization(
560
+ self.event_handler._handle_event_synchronization
561
+ )
562
+
563
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
564
+ if kwargs is None:
565
+ kwargs = {}
566
+
567
+ argument_handler = ArgumentHandler()
568
+ argument_handler.parse_inputs(func._schema, args, kwargs)
569
+
570
+ outputs = func(*args, **kwargs)
571
+
572
+ argument_handler.parse_outputs(outputs)
573
+ errors = self.event_handler._handle_kernel_launch(
574
+ torch.cuda.current_stream().cuda_stream,
575
+ argument_handler.dataptrs_read - argument_handler.dataptrs_written,
576
+ argument_handler.dataptrs_written,
577
+ argument_handler.outputs,
578
+ func._schema,
579
+ argument_handler.tensor_aliases,
580
+ )
581
+ if errors:
582
+ for error in errors:
583
+ print(error, file=sys.stderr)
584
+ raise CUDASanitizerErrors(errors)
585
+
586
+ return outputs
587
+
588
+
589
+ class CUDASanitizer:
590
+ """Manages the lifetime of a CUDASanitizer dispatch mode object.
591
+
592
+ The CUDASanitizer class wraps the entering/exiting functions of the dispatch mode
593
+ context manager in the enable function/destructor, respectively. This is to
594
+ explicitly set the lifetime of the dispatch mode object to that of the application.
595
+ This approach was deemed more elegant than using the atexit module.
596
+ """
597
+
598
+ def __init__(self):
599
+ self.dispatch = CUDASanitizerDispatchMode()
600
+ self.enabled = False
601
+
602
+ def enable(self):
603
+ self.dispatch.__enter__()
604
+ self.enabled = True
605
+
606
+ def __del__(self):
607
+ if self.enabled:
608
+ self.dispatch.__exit__(None, None, None)
609
+
610
+
611
+ def enable_cuda_sanitizer():
612
+ """Enable CUDA Sanitizer.
613
+
614
+ The sanitizer will begin to analyze low-level CUDA calls invoked by torch functions
615
+ for synchronization errors. All data races found will be printed to the standard
616
+ error output along with stack traces of suspected causes. For best results, the
617
+ sanitizer should be enabled at the very beginning of the program.
618
+ """
619
+ cuda_sanitizer.enable()
620
+
621
+
622
+ cuda_sanitizer = CUDASanitizer()
venv/lib/python3.10/site-packages/torch/cuda/_utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ import torch
4
+
5
+ # The _get_device_index has been moved to torch.utils._get_device_index
6
+ from torch._utils import _get_device_index as _torch_get_device_index
7
+
8
+
9
+ def _get_device_index(
10
+ device: Any, optional: bool = False, allow_cpu: bool = False
11
+ ) -> int:
12
+ r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``.
13
+
14
+ If :attr:`device` is a torch.device object, returns the device index if it
15
+ is a CUDA device. Note that for a CUDA device without a specified index,
16
+ i.e., ``torch.device('cuda')``, this will return the current default CUDA
17
+ device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
18
+ CPU devices will be accepted and ``-1`` will be returned in this case.
19
+
20
+ If :attr:`device` is a Python integer, it is returned as is.
21
+
22
+ If :attr:`device` is ``None``, this will return the current default CUDA
23
+ device if :attr:`optional` is ``True``.
24
+ """
25
+ if isinstance(device, int):
26
+ return device
27
+ if isinstance(device, str):
28
+ device = torch.device(device)
29
+ if isinstance(device, torch.device):
30
+ if allow_cpu:
31
+ if device.type not in ["cuda", "cpu"]:
32
+ raise ValueError(f"Expected a cuda or cpu device, but got: {device}")
33
+ elif device.type != "cuda":
34
+ raise ValueError(f"Expected a cuda device, but got: {device}")
35
+ if not torch.jit.is_scripting():
36
+ if isinstance(device, torch.cuda.device):
37
+ return device.idx
38
+ return _torch_get_device_index(device, optional, allow_cpu)
venv/lib/python3.10/site-packages/torch/cuda/comm.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The functions here have been moved to torch.nn.parallel.comm
2
+ from torch.nn.parallel.comm import (
3
+ broadcast,
4
+ broadcast_coalesced,
5
+ gather,
6
+ reduce_add,
7
+ reduce_add_coalesced,
8
+ scatter,
9
+ )
10
+
11
+ __all__ = [
12
+ "broadcast",
13
+ "broadcast_coalesced",
14
+ "reduce_add",
15
+ "reduce_add_coalesced",
16
+ "scatter",
17
+ "gather",
18
+ ]
venv/lib/python3.10/site-packages/torch/cuda/error.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/cuda/graphs.py ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ from typing import Optional
3
+
4
+ import torch
5
+ from torch.utils import _pytree
6
+ from .._utils import _dummy_type
7
+
8
+ if not hasattr(torch._C, "_CudaStreamBase"):
9
+ # Define dummy base classes
10
+ torch._C.__dict__["_CUDAGraph"] = _dummy_type("_CUDAGraph")
11
+ torch._C.__dict__["_graph_pool_handle"] = _dummy_type("_graph_pool_handle")
12
+ torch._C.__dict__["_cuda_isCurrentStreamCapturing"] = _dummy_type(
13
+ "_cuda_isCurrentStreamCapturing"
14
+ )
15
+
16
+ from torch._C import ( # noqa: F401
17
+ _cuda_isCurrentStreamCapturing,
18
+ _CUDAGraph,
19
+ _graph_pool_handle,
20
+ )
21
+
22
+
23
+ def is_current_stream_capturing():
24
+ r"""Return True if CUDA graph capture is underway on the current CUDA stream, False otherwise.
25
+
26
+ If a CUDA context does not exist on the current device, returns False without initializing the context.
27
+ """
28
+ return _cuda_isCurrentStreamCapturing()
29
+
30
+
31
+ # Python shim helps Sphinx process docstrings more reliably.
32
+ def graph_pool_handle():
33
+ r"""Return an opaque token representing the id of a graph memory pool.
34
+
35
+ See :ref:`Graph memory management<graph-memory-management>`.
36
+
37
+ .. warning::
38
+ This API is in beta and may change in future releases.
39
+ """
40
+ return _graph_pool_handle()
41
+
42
+
43
+ # Python shim helps Sphinx process docstrings more reliably.
44
+ class CUDAGraph(torch._C._CUDAGraph):
45
+ r"""Wrapper around a CUDA graph.
46
+
47
+ .. warning::
48
+ This API is in beta and may change in future releases.
49
+ """
50
+
51
+ def __new__(cls):
52
+ return super().__new__(cls)
53
+
54
+ def capture_begin(self, pool=None, capture_error_mode="global"):
55
+ r"""Begin capturing CUDA work on the current stream.
56
+
57
+ Typically, you shouldn't call ``capture_begin`` yourself.
58
+ Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`,
59
+ which call ``capture_begin`` internally.
60
+
61
+ Arguments:
62
+ pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or
63
+ :meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) that hints this graph may share memory
64
+ with the indicated pool. See :ref:`Graph memory management<graph-memory-management>`.
65
+ capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream.
66
+ Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc,
67
+ may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for
68
+ actions in the current thread, and "relaxed" will not error on these actions. Do NOT change this setting
69
+ unless you're familiar with `cudaStreamCaptureMode <https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85>`_
70
+ """ # noqa: B950
71
+ super().capture_begin(pool=pool, capture_error_mode=capture_error_mode)
72
+
73
+ def capture_end(self):
74
+ r"""End CUDA graph capture on the current stream.
75
+
76
+ After ``capture_end``, ``replay`` may be called on this instance.
77
+
78
+ Typically, you shouldn't call ``capture_end`` yourself.
79
+ Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`,
80
+ which call ``capture_end`` internally.
81
+ """
82
+ super().capture_end()
83
+
84
+ def replay(self):
85
+ r"""Replay the CUDA work captured by this graph."""
86
+ super().replay()
87
+
88
+ def reset(self):
89
+ r"""Delete the graph currently held by this instance."""
90
+ super().reset()
91
+
92
+ def pool(self):
93
+ r"""Return an opaque token representing the id of this graph's memory pool.
94
+
95
+ This id can optionally be passed to another graph's ``capture_begin``,
96
+ which hints the other graph may share the same memory pool.
97
+ """
98
+ return super().pool()
99
+
100
+ def enable_debug_mode(self):
101
+ r"""Enable debugging mode for CUDAGraph.debug_dump."""
102
+ return super().enable_debug_mode()
103
+
104
+ def debug_dump(self, debug_path):
105
+ r"""
106
+ Arguments:
107
+ debug_path (required): Path to dump the graph to.
108
+
109
+ Calls a debugging function to dump the graph if the debugging is
110
+ enabled via CUDAGraph.enable_debug_mode()
111
+ """
112
+ return super().debug_dump(debug_path)
113
+
114
+
115
+ class graph:
116
+ r"""Context-manager that captures CUDA work into a :class:`torch.cuda.CUDAGraph` object for later replay.
117
+
118
+ See :ref:`CUDA Graphs <cuda-graph-semantics>` for a general introduction,
119
+ detailed use, and constraints.
120
+
121
+ Arguments:
122
+ cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture.
123
+ pool (optional): Opaque token (returned by a call to :func:`~torch.cuda.graph_pool_handle()` or
124
+ :meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) hinting this graph's capture
125
+ may share memory from the specified pool. See :ref:`Graph memory management<graph-memory-management>`.
126
+ stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context.
127
+ If not supplied, ``graph`` sets its own internal side stream as the current stream in the context.
128
+ capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream.
129
+ Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc,
130
+ may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for
131
+ actions in the current thread, and "relaxed" will not error on actions. Do NOT change this setting
132
+ unless you're familiar with `cudaStreamCaptureMode <https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85>`_
133
+
134
+ .. note::
135
+ For effective memory sharing, if you pass a ``pool`` used by a previous capture and the previous capture
136
+ used an explicit ``stream`` argument, you should pass the same ``stream`` argument to this capture.
137
+
138
+ .. warning::
139
+ This API is in beta and may change in future releases.
140
+
141
+ .. _cudaStreamCaptureMode:
142
+ https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85
143
+ """ # noqa: B950
144
+
145
+ default_capture_stream: Optional["torch.cuda.Stream"] = None
146
+
147
+ def __init__(
148
+ self,
149
+ cuda_graph,
150
+ pool=None,
151
+ stream=None,
152
+ capture_error_mode: str = "global",
153
+ ):
154
+ # Lazy-init of default_capture_stream helps avoid circular-import errors.
155
+ # Not thread safe, but graphs already have the general (explicitly documented)
156
+ # restriction that only one capture may be underway at a time in the process.
157
+ if self.__class__.default_capture_stream is None:
158
+ self.__class__.default_capture_stream = torch.cuda.Stream()
159
+
160
+ self.pool = () if pool is None else (pool,)
161
+ self.capture_stream = (
162
+ stream if stream is not None else self.__class__.default_capture_stream
163
+ )
164
+ assert self.capture_stream is not None
165
+ self.stream_ctx = torch.cuda.stream(self.capture_stream)
166
+ self.cuda_graph = cuda_graph
167
+ self.capture_error_mode = capture_error_mode
168
+
169
+ def __enter__(self):
170
+ # Free as much memory as we can for the graph
171
+ torch.cuda.synchronize()
172
+ gc.collect()
173
+ torch.cuda.empty_cache()
174
+
175
+ # Stackoverflow seems comfortable with this pattern
176
+ # https://stackoverflow.com/questions/26635684/calling-enter-and-exit-manually#39172487
177
+ self.stream_ctx.__enter__()
178
+
179
+ self.cuda_graph.capture_begin(
180
+ *self.pool, capture_error_mode=self.capture_error_mode
181
+ )
182
+
183
+ def __exit__(self, exc_type, exc_value, traceback):
184
+ self.cuda_graph.capture_end()
185
+ self.stream_ctx.__exit__(exc_type, exc_value, traceback)
186
+ # returning None should propagate exceptions from either capture_end or stream_ctx.__exit__()
187
+
188
+
189
+ def make_graphed_callables(
190
+ callables, sample_args, num_warmup_iters=3, allow_unused_input=False, pool=None
191
+ ):
192
+ r"""Accept callables (functions or :class:`nn.Module<torch.nn.Module>`\ s) and returns graphed versions.
193
+
194
+ Each graphed callable's forward pass runs its source callable's
195
+ forward CUDA work as a CUDA graph inside a single autograd node.
196
+
197
+ The graphed callable's forward pass also appends
198
+ a backward node to the autograd graph. During backward, this node runs the
199
+ callable's backward work as a CUDA graph.
200
+
201
+ Therefore, each graphed callable should be a drop-in replacement for its source callable
202
+ in an autograd-enabled training loop.
203
+
204
+ See :ref:`Partial-network capture<partial-network-capture>` for detailed use and constraints.
205
+
206
+ If you pass a tuple of several callables, their captures will use the same memory pool.
207
+ See :ref:`Graph memory management<graph-memory-management>` for when this is appropriate.
208
+
209
+ Arguments:
210
+ callables (torch.nn.Module or Python function, or tuple of these): Callable or callables to graph.
211
+ See :ref:`Graph memory management<graph-memory-management>` for when passing a tuple of callables
212
+ is appropriate. If you pass a tuple of callables, their order in the tuple must be the same order
213
+ they'll run in the live workload.
214
+ sample_args (tuple of Tensors, or tuple of tuples of Tensors): Samples args for each callable.
215
+ If a single callable was passed, ``sample_args`` must be a single tuple of argument Tensors.
216
+ If a tuple of callables was passed, ``sample_args`` must be tuple of tuples of argument Tensors.
217
+ num_warmup_iters (int): The number of warmup iterations. Currently, ``DataDistributedParallel`` needs
218
+ 11 iterations for warm up. Default: ``3``.
219
+ allow_unused_input (bool): If False, specifying inputs that were not used when computing outputs
220
+ (and therefore their grad is always zero) is an error. Defaults to False.
221
+ pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or
222
+ :meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) that hints this graph may share memory
223
+ with the indicated pool. See :ref:`Graph memory management<graph-memory-management>`.
224
+ .. note::
225
+ The ``requires_grad`` state of each Tensor in ``sample_args`` must match the state
226
+ that's expected for the corresponding real input in the training loop.
227
+
228
+ .. warning::
229
+ This API is in beta and may change in future releases.
230
+
231
+ .. warning::
232
+ ``sample_args`` for each callable must contain only Tensors. Other types are not allowed.
233
+
234
+ .. warning::
235
+ Returned callables do not support higher order differentiation (e.g., double backward).
236
+
237
+ .. warning::
238
+ In any :class:`~torch.nn.Module` passed to :func:`~make_graphed_callables`, only parameters
239
+ may be trainable. Buffers must have ``requires_grad=False``.
240
+
241
+ .. warning::
242
+ After you pass a :class:`torch.nn.Module` through :func:`~make_graphed_callables`,
243
+ you may not add or remove any of that Module's parameters or buffers.
244
+
245
+ .. warning::
246
+ :class:`torch.nn.Module`\s passed to :func:`~torch.cuda.make_graphed_callables` must not have module hooks
247
+ registered on them at the time they are passed. However, registering hooks on modules *after* passing them
248
+ through :func:`~torch.cuda.make_graphed_callables` is allowed.
249
+
250
+ .. warning::
251
+ When running a graphed callable, you must pass its arguments in the same order and format
252
+ they appeared in that callable's ``sample_args``.
253
+
254
+ .. warning::
255
+ The automatic mixed precision is supported in :func:`~torch.cuda.make_graphed_callables` only with disabled
256
+ caching. The context manager `torch.cuda.amp.autocast()` must have `cache_enabled=False`.
257
+ """
258
+ if torch.is_autocast_enabled() and torch.is_autocast_cache_enabled():
259
+ raise RuntimeError(
260
+ "make_graphed_callables does not support the autocast caching. Please set `cache_enabled=False`."
261
+ )
262
+
263
+ just_one_callable = False
264
+
265
+ if not isinstance(callables, tuple):
266
+ just_one_callable = True
267
+ callables = (callables,)
268
+ sample_args = (sample_args,)
269
+
270
+ flatten_sample_args = []
271
+
272
+ for c, args in zip(callables, sample_args):
273
+ if isinstance(c, torch.nn.Module):
274
+ assert (
275
+ len(c._backward_hooks) == 0
276
+ and len(c._forward_hooks) == 0
277
+ and len(c._forward_pre_hooks) == 0
278
+ ), (
279
+ "Modules must not have hooks registered at the time they are passed. However, registering hooks "
280
+ + "on modules after passing them through make_graphed_callables is allowed."
281
+ )
282
+ assert all(b.requires_grad is False for b in c.buffers()), (
283
+ "In any :class:`~torch.nn.Module` passed to "
284
+ + ":func:`~make_graphed_callables`, only parameters may be trainable. All buffers must have "
285
+ + "``requires_grad=False``."
286
+ )
287
+ flatten_arg = _pytree.arg_tree_leaves(*args)
288
+ flatten_sample_args.append(tuple(flatten_arg))
289
+ assert all(isinstance(arg, torch.Tensor) for arg in flatten_arg), (
290
+ "In the beta API, sample_args "
291
+ + "for each callable must contain only Tensors. Other types are not allowed."
292
+ )
293
+
294
+ # If a callable is an nn.Module, its graph's full input surface is the args the user explicitly
295
+ # passes to forward (ie, its sample_args) AND the module's parameter attributes.
296
+ per_callable_len_user_args = [len(args) for args in flatten_sample_args]
297
+ per_callable_module_params = [
298
+ tuple(c.parameters()) if isinstance(c, torch.nn.Module) else ()
299
+ for c in callables
300
+ ]
301
+ per_callable_static_input_surfaces = [
302
+ flatten_sample_args[i] + per_callable_module_params[i]
303
+ for i in range(len(callables))
304
+ ]
305
+
306
+ fwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))]
307
+ bwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))]
308
+
309
+ mempool = graph_pool_handle() if pool is None else pool
310
+
311
+ # Warmup
312
+ # Hopefully prevents cudnn benchmarking and other lazy-initialization cuda work
313
+ # from ending up in any captures.
314
+ torch.cuda.synchronize()
315
+ with torch.cuda.stream(torch.cuda.Stream()):
316
+ for func, args, static_input_surface in zip(
317
+ callables, sample_args, per_callable_static_input_surfaces
318
+ ):
319
+ for _ in range(num_warmup_iters):
320
+ outputs = _pytree.tree_leaves(func(*args))
321
+ grad_inputs = torch.autograd.grad(
322
+ outputs=tuple(o for o in outputs if o.requires_grad),
323
+ inputs=tuple(i for i in static_input_surface if i.requires_grad),
324
+ grad_outputs=tuple(
325
+ torch.empty_like(o) for o in outputs if o.requires_grad
326
+ ),
327
+ only_inputs=True,
328
+ allow_unused=allow_unused_input,
329
+ )
330
+ del outputs, grad_inputs # type: ignore[possibly-undefined]
331
+ torch.cuda.synchronize()
332
+
333
+ # All captures here share a mempool. To avoid replays corrupting each other's memory,
334
+ # the safest approach is to capture all passes in the same order they'll run:
335
+ # fwd 1, fwd 2, ... fwd N, then bwd N, bwd N-1, ... bwd 1.
336
+
337
+ # Capture forward graphs
338
+ per_callable_static_outputs = []
339
+ per_callable_output_unflatten_spec = []
340
+ for func, args, fwd_graph in zip(callables, sample_args, fwd_graphs):
341
+ with torch.cuda.graph(fwd_graph, pool=mempool):
342
+ outputs = func(*args)
343
+
344
+ flatten_outputs, spec = _pytree.tree_flatten(outputs)
345
+ per_callable_static_outputs.append(tuple(flatten_outputs))
346
+ per_callable_output_unflatten_spec.append(spec)
347
+
348
+ # Capture backward graphs in reverse order
349
+ per_callable_static_grad_outputs = []
350
+ per_callable_static_grad_inputs = []
351
+ for static_input_surface, static_outputs, bwd_graph, module_params in zip(
352
+ reversed(per_callable_static_input_surfaces),
353
+ reversed(per_callable_static_outputs),
354
+ reversed(bwd_graphs),
355
+ reversed(per_callable_module_params),
356
+ ):
357
+ # For now, assumes all static_outputs require grad
358
+ # assert all(o.requires_grad for o in static_outputs), "Outputs of graphed callables must require grad."
359
+ static_grad_outputs = tuple(
360
+ torch.empty_like(o) if o.requires_grad else None for o in static_outputs
361
+ )
362
+
363
+ with torch.cuda.graph(bwd_graph, pool=mempool):
364
+ grad_inputs = torch.autograd.grad(
365
+ outputs=tuple(o for o in static_outputs if o.requires_grad),
366
+ inputs=tuple(i for i in static_input_surface if i.requires_grad),
367
+ grad_outputs=tuple(o for o in static_grad_outputs if o is not None),
368
+ only_inputs=True,
369
+ allow_unused=allow_unused_input,
370
+ )
371
+
372
+ # Constructs a tuple suitable for returning from Graphed.backward:
373
+ # Pads out the actually-needed grads with Nones in gradient slots for inputs that don't require grad.
374
+ # I couldn't think of a slick one-liner for this pattern.
375
+ static_grad_inputs = []
376
+ grad_idx = 0
377
+ for arg in static_input_surface:
378
+ if arg.requires_grad:
379
+ static_grad_inputs.append(grad_inputs[grad_idx])
380
+ grad_idx += 1
381
+ else:
382
+ static_grad_inputs.append(None) # type: ignore[arg-type]
383
+ static_grad_inputs = tuple(static_grad_inputs) # type: ignore[assignment]
384
+
385
+ per_callable_static_grad_outputs.append(static_grad_outputs)
386
+ per_callable_static_grad_inputs.append(static_grad_inputs)
387
+
388
+ # Reverses the most recent two lists
389
+ per_callable_static_grad_outputs.reverse()
390
+ per_callable_static_grad_inputs.reverse()
391
+ # Now for every per_callable list, per_callable_*[i] holds the stuff for the ith callable.
392
+
393
+ def make_graphed_autograd_function(
394
+ fwd_graph,
395
+ bwd_graph,
396
+ module_params,
397
+ len_user_args,
398
+ output_unflatten_spec,
399
+ static_input_surface,
400
+ static_outputs,
401
+ static_grad_outputs,
402
+ static_grad_inputs,
403
+ ):
404
+ class Graphed(torch.autograd.Function):
405
+ @staticmethod
406
+ def forward(ctx, *inputs):
407
+ # At this stage, only the user args may (potentially) be new tensors.
408
+ for i in range(len_user_args):
409
+ if static_input_surface[i].data_ptr() != inputs[i].data_ptr():
410
+ static_input_surface[i].copy_(inputs[i])
411
+ fwd_graph.replay()
412
+ assert isinstance(static_outputs, tuple)
413
+ return tuple(o.detach() for o in static_outputs)
414
+
415
+ @staticmethod
416
+ @torch.autograd.function.once_differentiable
417
+ def backward(ctx, *grads):
418
+ assert len(grads) == len(static_grad_outputs)
419
+ for g, grad in zip(static_grad_outputs, grads):
420
+ if g is not None:
421
+ # don't copy if autograd gods have been kind and the
422
+ # incoming grad is already in the right place
423
+ if g.data_ptr() != grad.data_ptr():
424
+ g.copy_(grad)
425
+ bwd_graph.replay()
426
+
427
+ # Input args that didn't require grad expect a None gradient.
428
+ assert isinstance(static_grad_inputs, tuple)
429
+ return tuple(
430
+ b.detach() if b is not None else b for b in static_grad_inputs
431
+ )
432
+
433
+ def functionalized(*user_args):
434
+ # Runs the autograd function with inputs == all inputs to the graph that might require grad
435
+ # (explicit user args + module parameters)
436
+ # Assumes module params didn't change since capture.
437
+ flatten_user_args = _pytree.arg_tree_leaves(*user_args)
438
+ out = Graphed.apply(*(tuple(flatten_user_args) + module_params))
439
+ return _pytree.tree_unflatten(out, output_unflatten_spec)
440
+
441
+ return functionalized
442
+
443
+ # Put together the final graphed callables
444
+ ret = []
445
+ for i, func in enumerate(callables):
446
+ graphed = make_graphed_autograd_function(
447
+ fwd_graphs[i],
448
+ bwd_graphs[i],
449
+ per_callable_module_params[i],
450
+ per_callable_len_user_args[i],
451
+ per_callable_output_unflatten_spec[i],
452
+ per_callable_static_input_surfaces[i],
453
+ per_callable_static_outputs[i],
454
+ per_callable_static_grad_outputs[i],
455
+ per_callable_static_grad_inputs[i],
456
+ )
457
+
458
+ if isinstance(func, torch.nn.Module):
459
+
460
+ def make_graphed_forward(func, graph_training_state, graphed, orig_fwd):
461
+ def new_fwd(*user_args):
462
+ # If the module's training-or-eval state matches what we graphed,
463
+ # run the graph, otherwise run the original forward method
464
+ if func.training == graph_training_state:
465
+ return graphed(*user_args)
466
+ else:
467
+ return orig_fwd(*user_args)
468
+
469
+ return new_fwd
470
+
471
+ func.forward = make_graphed_forward(func, func.training, graphed, func.forward) # type: ignore[assignment]
472
+ ret.append(func)
473
+ else:
474
+ ret.append(graphed)
475
+
476
+ if just_one_callable:
477
+ return ret[0]
478
+
479
+ return tuple(ret)
venv/lib/python3.10/site-packages/torch/cuda/nccl.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import warnings
3
+ from typing import Optional, Sequence, Union
4
+
5
+ import torch.cuda
6
+
7
+
8
+ __all__ = ["all_reduce", "reduce", "broadcast", "all_gather", "reduce_scatter"]
9
+
10
+ SUM = 0 # ncclRedOp_t
11
+
12
+
13
+ def is_available(tensors):
14
+ if not hasattr(torch._C, "_nccl_all_reduce"):
15
+ warnings.warn("PyTorch is not compiled with NCCL support")
16
+ return False
17
+
18
+ devices = set()
19
+ for tensor in tensors:
20
+ if tensor.is_sparse:
21
+ return False
22
+ if not tensor.is_contiguous():
23
+ return False
24
+ if not tensor.is_cuda:
25
+ return False
26
+ device = tensor.get_device()
27
+ if device in devices:
28
+ return False
29
+ devices.add(device)
30
+
31
+ return True
32
+
33
+
34
+ def version():
35
+ ver = torch._C._nccl_version()
36
+ major = ver >> 32
37
+ minor = (ver >> 16) & 65535
38
+ patch = ver & 65535
39
+ suffix = torch._C._nccl_version_suffix().decode("utf-8")
40
+ if suffix == "":
41
+ return (major, minor, patch)
42
+ else:
43
+ return (major, minor, patch, suffix)
44
+
45
+
46
+ def unique_id():
47
+ return torch._C._nccl_unique_id()
48
+
49
+
50
+ def init_rank(num_ranks, uid, rank):
51
+ return torch._C._nccl_init_rank(num_ranks, uid, rank)
52
+
53
+
54
+ def _check_sequence_type(inputs: Union[torch.Tensor, Sequence[torch.Tensor]]) -> None:
55
+ if not isinstance(inputs, collections.abc.Container) or isinstance(
56
+ inputs, torch.Tensor
57
+ ):
58
+ raise TypeError("Inputs should be a collection of tensors")
59
+
60
+
61
+ def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None):
62
+ _check_sequence_type(inputs)
63
+ if outputs is None:
64
+ outputs = inputs
65
+ _check_sequence_type(outputs)
66
+ torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms)
67
+
68
+
69
+ # `output` used to be `outputs`, taking in a list of tensors. So we have two
70
+ # arguments for BC reasons.
71
+ def reduce(
72
+ inputs: Sequence[torch.Tensor],
73
+ output: Optional[Union[torch.Tensor, Sequence[torch.Tensor]]] = None,
74
+ root: int = 0,
75
+ op: int = SUM,
76
+ streams: Optional[Sequence[torch.cuda.Stream]] = None,
77
+ comms=None,
78
+ *,
79
+ outputs: Optional[Sequence[torch.Tensor]] = None,
80
+ ) -> None:
81
+ _check_sequence_type(inputs)
82
+ _output: torch.Tensor
83
+ if outputs is not None:
84
+ if output is not None:
85
+ raise ValueError(
86
+ "'output' and 'outputs' can not be both specified. 'outputs' is deprecated in "
87
+ "favor of 'output', taking in a single output tensor. The signature of reduce is: "
88
+ "reduce(inputs, output=None, root=0, op=SUM, streams=None, comms=None)."
89
+ )
90
+ else:
91
+ warnings.warn(
92
+ "nccl.reduce with an output tensor list is deprecated. "
93
+ "Please specify a single output tensor with argument 'output' instead instead."
94
+ )
95
+ _output = outputs[root]
96
+ elif not isinstance(output, torch.Tensor) and isinstance(
97
+ output, collections.abc.Sequence
98
+ ):
99
+ # User called old API with positional arguments of list of output tensors.
100
+ warnings.warn(
101
+ "nccl.reduce with an output tensor list is deprecated. "
102
+ "Please specify a single output tensor."
103
+ )
104
+ _output = output[root]
105
+ else:
106
+ _output = inputs[root] if output is None else output
107
+ torch._C._nccl_reduce(inputs, _output, root, op, streams, comms)
108
+
109
+
110
+ def broadcast(
111
+ inputs: Sequence[torch.Tensor], root: int = 0, streams=None, comms=None
112
+ ) -> None:
113
+ _check_sequence_type(inputs)
114
+ torch._C._nccl_broadcast(inputs, root, streams, comms)
115
+
116
+
117
+ def all_gather(
118
+ inputs: Sequence[torch.Tensor],
119
+ outputs: Sequence[torch.Tensor],
120
+ streams=None,
121
+ comms=None,
122
+ ) -> None:
123
+ _check_sequence_type(inputs)
124
+ _check_sequence_type(outputs)
125
+ torch._C._nccl_all_gather(inputs, outputs, streams, comms)
126
+
127
+
128
+ def reduce_scatter(
129
+ inputs: Sequence[torch.Tensor],
130
+ outputs: Sequence[torch.Tensor],
131
+ op: int = SUM,
132
+ streams=None,
133
+ comms=None,
134
+ ) -> None:
135
+ _check_sequence_type(inputs)
136
+ _check_sequence_type(outputs)
137
+ torch._C._nccl_reduce_scatter(inputs, outputs, op, streams, comms)
venv/lib/python3.10/site-packages/torch/cuda/nvtx.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""This package adds support for NVIDIA Tools Extension (NVTX) used in profiling."""
2
+
3
+ from contextlib import contextmanager
4
+
5
+ try:
6
+ from torch._C import _nvtx
7
+ except ImportError:
8
+
9
+ class _NVTXStub:
10
+ @staticmethod
11
+ def _fail(*args, **kwargs):
12
+ raise RuntimeError(
13
+ "NVTX functions not installed. Are you sure you have a CUDA build?"
14
+ )
15
+
16
+ rangePushA = _fail
17
+ rangePop = _fail
18
+ markA = _fail
19
+
20
+ _nvtx = _NVTXStub() # type: ignore[assignment]
21
+
22
+ __all__ = ["range_push", "range_pop", "range_start", "range_end", "mark", "range"]
23
+
24
+
25
+ def range_push(msg):
26
+ """
27
+ Push a range onto a stack of nested range span. Returns zero-based depth of the range that is started.
28
+
29
+ Args:
30
+ msg (str): ASCII message to associate with range
31
+ """
32
+ return _nvtx.rangePushA(msg)
33
+
34
+
35
+ def range_pop():
36
+ """Pop a range off of a stack of nested range spans. Returns the zero-based depth of the range that is ended."""
37
+ return _nvtx.rangePop()
38
+
39
+
40
+ def range_start(msg) -> int:
41
+ """
42
+ Mark the start of a range with string message. It returns an unique handle
43
+ for this range to pass to the corresponding call to rangeEnd().
44
+
45
+ A key difference between this and range_push/range_pop is that the
46
+ range_start/range_end version supports range across threads (start on one
47
+ thread and end on another thread).
48
+
49
+ Returns: A range handle (uint64_t) that can be passed to range_end().
50
+
51
+ Args:
52
+ msg (str): ASCII message to associate with the range.
53
+ """
54
+ return _nvtx.rangeStartA(msg)
55
+
56
+
57
+ def range_end(range_id) -> None:
58
+ """
59
+ Mark the end of a range for a given range_id.
60
+
61
+ Args:
62
+ range_id (int): an unique handle for the start range.
63
+ """
64
+ _nvtx.rangeEnd(range_id)
65
+
66
+
67
+ def mark(msg):
68
+ """
69
+ Describe an instantaneous event that occurred at some point.
70
+
71
+ Args:
72
+ msg (str): ASCII message to associate with the event.
73
+ """
74
+ return _nvtx.markA(msg)
75
+
76
+
77
+ @contextmanager
78
+ def range(msg, *args, **kwargs):
79
+ """
80
+ Context manager / decorator that pushes an NVTX range at the beginning
81
+ of its scope, and pops it at the end. If extra arguments are given,
82
+ they are passed as arguments to msg.format().
83
+
84
+ Args:
85
+ msg (str): message to associate with the range
86
+ """
87
+ range_push(msg.format(*args, **kwargs))
88
+ try:
89
+ yield
90
+ finally:
91
+ range_pop()
venv/lib/python3.10/site-packages/torch/cuda/random.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterable, List, Union
2
+
3
+ import torch
4
+ from .. import Tensor
5
+ from . import _lazy_call, _lazy_init, current_device, device_count
6
+
7
+ __all__ = [
8
+ "get_rng_state",
9
+ "get_rng_state_all",
10
+ "set_rng_state",
11
+ "set_rng_state_all",
12
+ "manual_seed",
13
+ "manual_seed_all",
14
+ "seed",
15
+ "seed_all",
16
+ "initial_seed",
17
+ ]
18
+
19
+
20
+ def get_rng_state(device: Union[int, str, torch.device] = "cuda") -> Tensor:
21
+ r"""Return the random number generator state of the specified GPU as a ByteTensor.
22
+
23
+ Args:
24
+ device (torch.device or int, optional): The device to return the RNG state of.
25
+ Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
26
+
27
+ .. warning::
28
+ This function eagerly initializes CUDA.
29
+ """
30
+ _lazy_init()
31
+ if isinstance(device, str):
32
+ device = torch.device(device)
33
+ elif isinstance(device, int):
34
+ device = torch.device("cuda", device)
35
+ idx = device.index
36
+ if idx is None:
37
+ idx = current_device()
38
+ default_generator = torch.cuda.default_generators[idx]
39
+ return default_generator.get_state()
40
+
41
+
42
+ def get_rng_state_all() -> List[Tensor]:
43
+ r"""Return a list of ByteTensor representing the random number states of all devices."""
44
+ results = []
45
+ for i in range(device_count()):
46
+ results.append(get_rng_state(i))
47
+ return results
48
+
49
+
50
+ def set_rng_state(
51
+ new_state: Tensor, device: Union[int, str, torch.device] = "cuda"
52
+ ) -> None:
53
+ r"""Set the random number generator state of the specified GPU.
54
+
55
+ Args:
56
+ new_state (torch.ByteTensor): The desired state
57
+ device (torch.device or int, optional): The device to set the RNG state.
58
+ Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
59
+ """
60
+ with torch._C._DisableFuncTorch():
61
+ new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
62
+ if isinstance(device, str):
63
+ device = torch.device(device)
64
+ elif isinstance(device, int):
65
+ device = torch.device("cuda", device)
66
+
67
+ def cb():
68
+ idx = device.index
69
+ if idx is None:
70
+ idx = current_device()
71
+ default_generator = torch.cuda.default_generators[idx]
72
+ default_generator.set_state(new_state_copy)
73
+
74
+ _lazy_call(cb)
75
+
76
+
77
+ def set_rng_state_all(new_states: Iterable[Tensor]) -> None:
78
+ r"""Set the random number generator state of all devices.
79
+
80
+ Args:
81
+ new_states (Iterable of torch.ByteTensor): The desired state for each device.
82
+ """
83
+ for i, state in enumerate(new_states):
84
+ set_rng_state(state, i)
85
+
86
+
87
+ def manual_seed(seed: int) -> None:
88
+ r"""Set the seed for generating random numbers for the current GPU.
89
+
90
+ It's safe to call this function if CUDA is not available; in that
91
+ case, it is silently ignored.
92
+
93
+ Args:
94
+ seed (int): The desired seed.
95
+
96
+ .. warning::
97
+ If you are working with a multi-GPU model, this function is insufficient
98
+ to get determinism. To seed all GPUs, use :func:`manual_seed_all`.
99
+ """
100
+ seed = int(seed)
101
+
102
+ def cb():
103
+ idx = current_device()
104
+ default_generator = torch.cuda.default_generators[idx]
105
+ default_generator.manual_seed(seed)
106
+
107
+ _lazy_call(cb, seed=True)
108
+
109
+
110
+ def manual_seed_all(seed: int) -> None:
111
+ r"""Set the seed for generating random numbers on all GPUs.
112
+
113
+ It's safe to call this function if CUDA is not available; in that
114
+ case, it is silently ignored.
115
+
116
+ Args:
117
+ seed (int): The desired seed.
118
+ """
119
+ seed = int(seed)
120
+
121
+ def cb():
122
+ for i in range(device_count()):
123
+ default_generator = torch.cuda.default_generators[i]
124
+ default_generator.manual_seed(seed)
125
+
126
+ _lazy_call(cb, seed_all=True)
127
+
128
+
129
+ def seed() -> None:
130
+ r"""Set the seed for generating random numbers to a random number for the current GPU.
131
+
132
+ It's safe to call this function if CUDA is not available; in that
133
+ case, it is silently ignored.
134
+
135
+ .. warning::
136
+ If you are working with a multi-GPU model, this function will only initialize
137
+ the seed on one GPU. To initialize all GPUs, use :func:`seed_all`.
138
+ """
139
+
140
+ def cb():
141
+ idx = current_device()
142
+ default_generator = torch.cuda.default_generators[idx]
143
+ default_generator.seed()
144
+
145
+ _lazy_call(cb)
146
+
147
+
148
+ def seed_all() -> None:
149
+ r"""Set the seed for generating random numbers to a random number on all GPUs.
150
+
151
+ It's safe to call this function if CUDA is not available; in that
152
+ case, it is silently ignored.
153
+ """
154
+
155
+ def cb():
156
+ random_seed = 0
157
+ seeded = False
158
+ for i in range(device_count()):
159
+ default_generator = torch.cuda.default_generators[i]
160
+ if not seeded:
161
+ default_generator.seed()
162
+ random_seed = default_generator.initial_seed()
163
+ seeded = True
164
+ else:
165
+ default_generator.manual_seed(random_seed)
166
+
167
+ _lazy_call(cb)
168
+
169
+
170
+ def initial_seed() -> int:
171
+ r"""Return the current random seed of the current GPU.
172
+
173
+ .. warning::
174
+ This function eagerly initializes CUDA.
175
+ """
176
+ _lazy_init()
177
+ idx = current_device()
178
+ default_generator = torch.cuda.default_generators[idx]
179
+ return default_generator.initial_seed()
venv/lib/python3.10/site-packages/torch/cuda/sparse.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # The Tensor classes are added to this module by python_tensor.cpp
venv/lib/python3.10/site-packages/torch/cuda/streams.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+
3
+ import torch
4
+ from torch._streambase import _EventBase, _StreamBase
5
+ from .._utils import _dummy_type
6
+
7
+
8
+ if not hasattr(torch._C, "_CudaStreamBase"):
9
+ # Define dummy base classes
10
+ torch._C.__dict__["_CudaStreamBase"] = _dummy_type("_CudaStreamBase")
11
+ torch._C.__dict__["_CudaEventBase"] = _dummy_type("_CudaEventBase")
12
+
13
+
14
+ class Stream(torch._C._CudaStreamBase, _StreamBase):
15
+ r"""Wrapper around a CUDA stream.
16
+
17
+ A CUDA stream is a linear sequence of execution that belongs to a specific
18
+ device, independent from other streams. See :ref:`cuda-semantics` for
19
+ details.
20
+
21
+ Args:
22
+ device(torch.device or int, optional): a device on which to allocate
23
+ the stream. If :attr:`device` is ``None`` (default) or a negative
24
+ integer, this will use the current device.
25
+ priority(int, optional): priority of the stream, should be 0 or
26
+ negative, where negative numbers indicate higher priority. By default,
27
+ streams have priority 0.
28
+
29
+ """
30
+
31
+ def __new__(cls, device=None, priority=0, **kwargs):
32
+ # setting device manager is expensive, so we avoid it unless necessary
33
+ if device is None or ("stream_id" in kwargs and "device_index" in kwargs):
34
+ return super().__new__(cls, priority=priority, **kwargs)
35
+ else:
36
+ with torch.cuda.device(device):
37
+ return super().__new__(cls, priority=priority, **kwargs)
38
+
39
+ def wait_event(self, event):
40
+ r"""Make all future work submitted to the stream wait for an event.
41
+
42
+ Args:
43
+ event (torch.cuda.Event): an event to wait for.
44
+
45
+ .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see
46
+ `CUDA Stream documentation`_ for more info.
47
+
48
+ This function returns without waiting for :attr:`event`: only future
49
+ operations are affected.
50
+
51
+ .. _CUDA Stream documentation:
52
+ https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html
53
+ """
54
+ event.wait(self)
55
+
56
+ def wait_stream(self, stream):
57
+ r"""Synchronize with another stream.
58
+
59
+ All future work submitted to this stream will wait until all kernels
60
+ submitted to a given stream at the time of call complete.
61
+
62
+ Args:
63
+ stream (Stream): a stream to synchronize.
64
+
65
+ .. note:: This function returns without waiting for currently enqueued
66
+ kernels in :attr:`stream`: only future operations are affected.
67
+ """
68
+ self.wait_event(stream.record_event())
69
+
70
+ def record_event(self, event=None):
71
+ r"""Record an event.
72
+
73
+ Args:
74
+ event (torch.cuda.Event, optional): event to record. If not given, a new one
75
+ will be allocated.
76
+
77
+ Returns:
78
+ Recorded event.
79
+ """
80
+ if event is None:
81
+ event = Event()
82
+ event.record(self)
83
+ return event
84
+
85
+ def query(self):
86
+ r"""Check if all the work submitted has been completed.
87
+
88
+ Returns:
89
+ A boolean indicating if all kernels in this stream are completed.
90
+ """
91
+ return super().query()
92
+
93
+ def synchronize(self):
94
+ r"""Wait for all the kernels in this stream to complete.
95
+
96
+ .. note:: This is a wrapper around ``cudaStreamSynchronize()``: see
97
+ `CUDA Stream documentation`_ for more info.
98
+ """
99
+ super().synchronize()
100
+
101
+ @property
102
+ def _as_parameter_(self):
103
+ return ctypes.c_void_p(self.cuda_stream)
104
+
105
+ def __eq__(self, o):
106
+ if isinstance(o, Stream):
107
+ return super().__eq__(o)
108
+ return False
109
+
110
+ def __hash__(self):
111
+ return hash((self.cuda_stream, self.device))
112
+
113
+ def __repr__(self):
114
+ return f"<torch.cuda.Stream device={self.device} cuda_stream={self.cuda_stream:#x}>"
115
+
116
+
117
+ class ExternalStream(Stream):
118
+ r"""Wrapper around an externally allocated CUDA stream.
119
+
120
+ This class is used to wrap streams allocated in other libraries in order
121
+ to facilitate data exchange and multi-library interactions.
122
+
123
+ .. note:: This class doesn't manage the stream life-cycle, it is the user
124
+ responsibility to keep the referenced stream alive while this class is
125
+ being used.
126
+
127
+ Args:
128
+ stream_ptr(int): Integer representation of the `cudaStream_t` value.
129
+ allocated externally.
130
+ device(torch.device or int, optional): the device where the stream
131
+ was originally allocated. if device is specified incorrectly,
132
+ subsequent launches using this stream may fail.
133
+ """
134
+
135
+ def __new__(cls, stream_ptr, device=None, **kwargs):
136
+ with torch.cuda.device(device):
137
+ return super().__new__(cls, stream_ptr=stream_ptr, **kwargs)
138
+
139
+
140
+ class Event(torch._C._CudaEventBase, _EventBase):
141
+ r"""Wrapper around a CUDA event.
142
+
143
+ CUDA events are synchronization markers that can be used to monitor the
144
+ device's progress, to accurately measure timing, and to synchronize CUDA
145
+ streams.
146
+
147
+ The underlying CUDA events are lazily initialized when the event is first
148
+ recorded or exported to another process. After creation, only streams on the
149
+ same device may record the event. However, streams on any device can wait on
150
+ the event.
151
+
152
+ Args:
153
+ enable_timing (bool, optional): indicates if the event should measure time
154
+ (default: ``False``)
155
+ blocking (bool, optional): if ``True``, :meth:`wait` will be blocking (default: ``False``)
156
+ interprocess (bool): if ``True``, the event can be shared between processes
157
+ (default: ``False``)
158
+
159
+ .. _CUDA Event Documentation:
160
+ https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html
161
+ """
162
+
163
+ def __new__(cls, enable_timing=False, blocking=False, interprocess=False):
164
+ return super().__new__(
165
+ cls,
166
+ enable_timing=enable_timing,
167
+ blocking=blocking,
168
+ interprocess=interprocess,
169
+ )
170
+
171
+ @classmethod
172
+ def from_ipc_handle(cls, device, handle):
173
+ r"""Reconstruct an event from an IPC handle on the given device."""
174
+ return super().from_ipc_handle(device, handle)
175
+
176
+ def record(self, stream=None):
177
+ r"""Record the event in a given stream.
178
+
179
+ Uses ``torch.cuda.current_stream()`` if no stream is specified. The
180
+ stream's device must match the event's device.
181
+ """
182
+ if stream is None:
183
+ stream = torch.cuda.current_stream()
184
+ super().record(stream)
185
+
186
+ def wait(self, stream=None):
187
+ r"""Make all future work submitted to the given stream wait for this event.
188
+
189
+ Use ``torch.cuda.current_stream()`` if no stream is specified.
190
+
191
+ .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see
192
+ `CUDA Event documentation`_ for more info.
193
+ """
194
+ if stream is None:
195
+ stream = torch.cuda.current_stream()
196
+ super().wait(stream)
197
+
198
+ def query(self):
199
+ r"""Check if all work currently captured by event has completed.
200
+
201
+ Returns:
202
+ A boolean indicating if all work currently captured by event has
203
+ completed.
204
+ """
205
+ return super().query()
206
+
207
+ def elapsed_time(self, end_event):
208
+ r"""Return the time elapsed.
209
+
210
+ Time reported in milliseconds after the event was recorded and
211
+ before the end_event was recorded.
212
+ """
213
+ return super().elapsed_time(end_event)
214
+
215
+ def synchronize(self):
216
+ r"""Wait for the event to complete.
217
+
218
+ Waits until the completion of all work currently captured in this event.
219
+ This prevents the CPU thread from proceeding until the event completes.
220
+
221
+ .. note:: This is a wrapper around ``cudaEventSynchronize()``: see
222
+ `CUDA Event documentation`_ for more info.
223
+ """
224
+ super().synchronize()
225
+
226
+ def ipc_handle(self):
227
+ r"""Return an IPC handle of this event.
228
+
229
+ If not recorded yet, the event will use the current device.
230
+ """
231
+ return super().ipc_handle()
232
+
233
+ @property
234
+ def _as_parameter_(self):
235
+ return ctypes.c_void_p(self.cuda_event)
236
+
237
+ def __repr__(self):
238
+ if self.cuda_event:
239
+ return f"<torch.cuda.Event {self._as_parameter_.value:#x}>"
240
+ else:
241
+ return "<torch.cuda.Event uninitialized>"
venv/lib/python3.10/site-packages/torch/func/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch._functorch.eager_transforms import (
2
+ vjp,
3
+ jvp,
4
+ jacrev,
5
+ jacfwd,
6
+ hessian,
7
+ functionalize,
8
+ linearize
9
+ )
10
+ from torch._functorch.apis import grad, grad_and_value
11
+ from torch._functorch.functional_call import functional_call, stack_module_state
12
+ from torch._functorch.batch_norm_replacement import replace_all_batch_norm_modules_
13
+ from torch._functorch.apis import vmap
venv/lib/python3.10/site-packages/torch/func/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (647 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/include/torch/csrc/CudaIPCTypes.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifdef USE_CUDA
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/cuda/CUDACachingAllocator.h>
5
+ #include <c10/cuda/CUDAException.h>
6
+ #include <c10/util/Logging.h>
7
+ #include <cuda_runtime_api.h>
8
+ #include <torch/csrc/Export.h>
9
+ #include <cstddef>
10
+ namespace torch {
11
+
12
+ TORCH_CUDA_CU_API bool CudaIPCCollect();
13
+
14
+ struct CudaIPCReceivedData final {
15
+ CudaIPCReceivedData() = default;
16
+ explicit CudaIPCReceivedData(std::shared_ptr<void> shared_ptr)
17
+ : shared_ptr_(std::move(shared_ptr)) {}
18
+ std::shared_ptr<void> shared_ptr_;
19
+ };
20
+
21
+ struct CudaIPCSentData final {
22
+ std::string handle_;
23
+ uint64_t offset_;
24
+ uint64_t* counter_ptr_; // Reference counter shared memory block
25
+ at::DataPtr original_ptr_; // Original mem allocation
26
+ cudaEvent_t event_; // Sync cuEventDestroy
27
+ bool event_sync_required_;
28
+ at::Device device_;
29
+
30
+ CudaIPCSentData(
31
+ std::string handle,
32
+ uint64_t offset,
33
+ uint64_t* counter_ptr,
34
+ at::Device device);
35
+ ~CudaIPCSentData();
36
+
37
+ uint64_t counter_value();
38
+ std::string handle() {
39
+ return handle_;
40
+ }
41
+ uint64_t offset() {
42
+ return offset_;
43
+ }
44
+ void set_original_ptr(at::DataPtr data_ptr) {
45
+ original_ptr_ = std::move(data_ptr);
46
+ }
47
+ };
48
+
49
+ TORCH_CUDA_CU_API at::DataPtr GetNewRefCountedSentData(
50
+ void* data,
51
+ at::Device device);
52
+
53
+ namespace {
54
+
55
+ inline constexpr int64_t CUDA_IPC_REF_COUNTER_FILE_SIZE = 10000;
56
+ inline constexpr int64_t CUDA_IPC_WARN_AFTER_X_BLOCKS_IN_LIMBO = 1000;
57
+ // This was determined empirically that CUDA (v10.1 and below) have the limit
58
+ // on the number of recorded blocking interprocess events. It is around ~22,000.
59
+ // And to give us leeway, we picked 1000 as it gives us enough events to share
60
+ // tensors effectively.
61
+ inline constexpr int64_t CUDA_IPC_MAXIMUM_EVENTS_TO_USE = 1000;
62
+
63
+ // All to be deleted data blocks with non zero reference counter goes there
64
+ struct CudaIPCSentDataLimbo final {
65
+ ~CudaIPCSentDataLimbo();
66
+ bool collect();
67
+ void add(std::unique_ptr<CudaIPCSentData> shared_block);
68
+ uint64_t size();
69
+
70
+ private:
71
+ // TODO: Can be changed to FIFO in order to avoid full traverse on every
72
+ // collect()
73
+ std::vector<std::unique_ptr<CudaIPCSentData>> shared_blocks_;
74
+ std::mutex limbo_mutex_;
75
+ };
76
+
77
+ struct CudaIPCRefCountersFile final {
78
+ CudaIPCRefCountersFile(
79
+ std::string handle,
80
+ uint64_t size,
81
+ at::DataPtr data_ptr)
82
+ : size_(size),
83
+
84
+ handle_(std::move(handle)),
85
+ refcounted_shared_mem_(std::move(data_ptr)) {}
86
+
87
+ uint64_t* counter_ptr() {
88
+ return static_cast<uint64_t*>(refcounted_shared_mem_.get()) + next_offset_;
89
+ }
90
+
91
+ void set_counter(uint64_t value) {
92
+ *counter_ptr() = value;
93
+ }
94
+
95
+ bool have_offsets() {
96
+ return next_offset_ < size_;
97
+ }
98
+
99
+ bool offsets_in_use() {
100
+ return used_slots_;
101
+ }
102
+
103
+ uint64_t get_offset() {
104
+ return next_offset_;
105
+ }
106
+
107
+ void rotate_offset() {
108
+ next_offset_++;
109
+ used_slots_++;
110
+ }
111
+
112
+ void return_offset(uint64_t offset /* unused */) {
113
+ used_slots_--;
114
+ }
115
+
116
+ std::string handle() {
117
+ return handle_;
118
+ }
119
+
120
+ private:
121
+ uint64_t next_offset_{0};
122
+ uint64_t size_;
123
+ uint64_t used_slots_{0};
124
+ std::string handle_;
125
+ at::DataPtr refcounted_shared_mem_;
126
+ };
127
+
128
+ } // namespace
129
+ } // namespace torch
130
+
131
+ namespace c10 {
132
+ namespace {
133
+ class CudaIPCCollectCallback : public FreeMemoryCallback {
134
+ public:
135
+ bool Execute() override {
136
+ return torch::CudaIPCCollect();
137
+ }
138
+ };
139
+ } // namespace
140
+
141
+ } // namespace c10
142
+
143
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/DataLoader.h ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables,modernize-avoid-c-arrays)
6
+ extern PyMethodDef DataLoaderMethods[];
venv/lib/python3.10/site-packages/torch/include/torch/csrc/DynamicTypes.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Provides conversions between Python tensor objects and at::Tensor.
4
+
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ #include <ATen/Device.h>
8
+ #include <c10/core/Backend.h>
9
+ #include <c10/core/Layout.h>
10
+ #include <c10/core/ScalarType.h>
11
+ #include <c10/core/ScalarTypeToTypeMeta.h>
12
+ #include <torch/csrc/Export.h>
13
+
14
+ #include <memory>
15
+ #include <string>
16
+
17
+ struct THPDtype;
18
+ struct THPLayout;
19
+
20
+ namespace c10 {
21
+ struct Storage;
22
+ }
23
+
24
+ namespace torch {
25
+ void registerDtypeObject(THPDtype* dtype, at::ScalarType scalarType);
26
+ void registerLayoutObject(THPLayout* thp_layout, at::Layout layout);
27
+
28
+ TORCH_PYTHON_API PyObject* createPyObject(const at::Storage& storage);
29
+ at::Storage createStorage(PyObject* obj);
30
+ std::tuple<at::Storage, at::ScalarType, bool> createStorageGetType(
31
+ PyObject* obj);
32
+ bool isStorage(PyObject* obj);
33
+
34
+ TORCH_PYTHON_API THPDtype* getTHPDtype(at::ScalarType scalarType);
35
+ THPLayout* getTHPLayout(at::Layout layout);
36
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/Export.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ #ifdef THP_BUILD_MAIN_LIB
6
+ #define TORCH_PYTHON_API C10_EXPORT
7
+ #else
8
+ #define TORCH_PYTHON_API C10_IMPORT
9
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/Layout.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/python_headers.h>
4
+
5
+ #include <ATen/Layout.h>
6
+
7
+ #include <string>
8
+
9
+ const int LAYOUT_NAME_LEN = 64;
10
+
11
+ struct THPLayout {
12
+ PyObject_HEAD at::Layout layout;
13
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
14
+ char name[LAYOUT_NAME_LEN + 1];
15
+ };
16
+
17
+ extern PyTypeObject THPLayoutType;
18
+
19
+ inline bool THPLayout_Check(PyObject* obj) {
20
+ return Py_TYPE(obj) == &THPLayoutType;
21
+ }
22
+
23
+ PyObject* THPLayout_New(at::Layout layout, const std::string& name);
24
+
25
+ void THPLayout_init(PyObject* module);
venv/lib/python3.10/site-packages/torch/include/torch/csrc/THConcat.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #define TH_CONCAT_STRING_2(x, y) TH_CONCAT_STRING_2_EXPAND(x, y)
4
+ #define TH_CONCAT_STRING_2_EXPAND(x, y) #x #y
5
+
6
+ #define TH_CONCAT_STRING_3(x, y, z) TH_CONCAT_STRING_3_EXPAND(x, y, z)
7
+ #define TH_CONCAT_STRING_3_EXPAND(x, y, z) #x #y #z
8
+
9
+ #define TH_CONCAT_STRING_4(x, y, z, w) TH_CONCAT_STRING_4_EXPAND(x, y, z, w)
10
+ #define TH_CONCAT_STRING_4_EXPAND(x, y, z, w) #x #y #z #w
11
+
12
+ #define TH_CONCAT_2(x, y) TH_CONCAT_2_EXPAND(x, y)
13
+ #define TH_CONCAT_2_EXPAND(x, y) x##y
14
+
15
+ #define TH_CONCAT_3(x, y, z) TH_CONCAT_3_EXPAND(x, y, z)
16
+ #define TH_CONCAT_3_EXPAND(x, y, z) x##y##z
17
+
18
+ #define TH_CONCAT_4_EXPAND(x, y, z, w) x##y##z##w
19
+ #define TH_CONCAT_4(x, y, z, w) TH_CONCAT_4_EXPAND(x, y, z, w)
venv/lib/python3.10/site-packages/torch/include/torch/csrc/THP.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THP_H
2
+ #define THP_H
3
+
4
+ #include <torch/csrc/Export.h>
5
+ #include <torch/csrc/python_headers.h>
6
+
7
+ // Back-compatibility macros, Thanks to http://cx-oracle.sourceforge.net/
8
+ // define PyInt_* macros for Python 3.x. NB: We must include Python.h first,
9
+ // otherwise we'll incorrectly conclude PyInt_Check isn't defined!
10
+ #ifndef PyInt_Check
11
+ #define PyInt_Check PyLong_Check
12
+ #define PyInt_FromLong PyLong_FromLong
13
+ #define PyInt_AsLong PyLong_AsLong
14
+ #define PyInt_Type PyLong_Type
15
+ #endif
16
+
17
+ #include <torch/csrc/Exceptions.h>
18
+ #include <torch/csrc/Generator.h>
19
+ #include <torch/csrc/Module.h>
20
+ #include <torch/csrc/Size.h>
21
+ #include <torch/csrc/Storage.h>
22
+ #include <torch/csrc/Types.h>
23
+ #include <torch/csrc/utils.h> // This requires defined Storage and Tensor types
24
+ #include <torch/csrc/utils/byte_order.h>
25
+
26
+ #include <torch/csrc/serialization.h>
27
+
28
+ #include <torch/csrc/autograd/python_autograd.h>
29
+
30
+ #endif
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h ADDED
@@ -0,0 +1,1101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // NB: Must be at the top of file to avoid including the deprecated "math.h".
4
+ // https://stackoverflow.com/questions/6563810/m-pi-works-with-math-h-but-not-with-cmath-in-visual-studio
5
+ #ifdef _MSC_VER
6
+ #ifndef _USE_MATH_DEFINES
7
+ #define _USE_MATH_DEFINES
8
+ #endif
9
+ #include <cmath>
10
+ #endif
11
+
12
+ #include <ATen/ATen.h>
13
+ #include <torch/csrc/autograd/generated/Functions.h>
14
+
15
+ namespace torch::autograd::generated::details {
16
+
17
+ extern const char* kCudnnDoubleBackwardMsg;
18
+
19
+ // A simple way to imperatively compute index ranges for slots
20
+ // that have been flattened
21
+ struct TORCH_API IndexRangeGenerator {
22
+ IndexRange range(size_t range_size) {
23
+ i += range_size;
24
+ return {i - range_size, i};
25
+ }
26
+ size_t size() {
27
+ return i;
28
+ }
29
+
30
+ private:
31
+ size_t i = 0;
32
+ };
33
+
34
+ TORCH_API Tensor toNonOptFwGrad(const c10::optional<Tensor>& t);
35
+ TORCH_API Tensor toNonOptPrimal(const c10::optional<Tensor>& t);
36
+ TORCH_API Tensor toNonOptTensor(const c10::optional<Tensor>& t);
37
+
38
+ TORCH_API inline c10::optional<Tensor> wrap_opt_if(
39
+ const Tensor& t,
40
+ const bool cond) {
41
+ using OptTensor = c10::optional<Tensor>;
42
+ return cond ? OptTensor(t) : static_cast<OptTensor>(c10::nullopt);
43
+ }
44
+
45
+ TORCH_API Tensor
46
+ apply_loss_reduction(const Tensor& unreduced, int64_t reduction);
47
+ TORCH_API bool any_variable_defined(const variable_list& variables);
48
+ TORCH_API void copy_range(
49
+ variable_list& out,
50
+ IndexRange range,
51
+ const at::Tensor& t);
52
+ TORCH_API void copy_range(
53
+ variable_list& out,
54
+ IndexRange range,
55
+ at::ArrayRef<at::Tensor> t);
56
+ TORCH_API at::Tensor copysign_tensor_self_backward(
57
+ const Tensor& grad,
58
+ const Tensor& self,
59
+ const Tensor& result);
60
+ TORCH_API at::Tensor not_implemented(const char* name, const char* reason = "");
61
+ TORCH_API std::vector<Tensor> not_implemented_list(
62
+ const char* name,
63
+ const char* reason = "");
64
+ at::Tensor handle_r_to_c(ScalarType self_st, Tensor gradient_result);
65
+ at::Tensor maybe_multiply(const at::Tensor& t, const at::Scalar& s);
66
+ int64_t _safe_size(IntArrayRef sizes, IntArrayRef dim);
67
+ Tensor restore_reduced_dims(
68
+ const Tensor& output,
69
+ IntArrayRef dims,
70
+ bool keepdim);
71
+ Tensor scale_grad_by_count(
72
+ const Tensor& grad,
73
+ const Tensor& mask,
74
+ IntArrayRef dims);
75
+ at::Tensor norm_backward(
76
+ const at::Tensor& grad,
77
+ const at::Tensor& self,
78
+ const optional<at::Scalar>& p_,
79
+ const at::Tensor& norm);
80
+ at::Tensor norm_backward(
81
+ at::Tensor grad,
82
+ const at::Tensor& self,
83
+ const optional<at::Scalar>& p_,
84
+ at::Tensor norm,
85
+ at::IntArrayRef dim,
86
+ bool keepdim);
87
+ Tensor norm_jvp(
88
+ const Tensor& self_p,
89
+ const Tensor& self_t,
90
+ const optional<Scalar>& p_,
91
+ Tensor norm,
92
+ IntArrayRef dim,
93
+ bool keepdim);
94
+ Tensor norm_jvp(
95
+ const Tensor& grad,
96
+ const Tensor& self,
97
+ const optional<Scalar>& p_,
98
+ Tensor norm);
99
+ Tensor _nested_from_padded_backward(
100
+ const Tensor& grad,
101
+ const Tensor& input,
102
+ const bool do_transform_0213);
103
+ std::tuple<Tensor, Tensor, Tensor> linear_double_backward(
104
+ const variable_list& grads,
105
+ const Tensor& self,
106
+ const Tensor& grad_output,
107
+ const Tensor& weight);
108
+ Tensor linalg_vector_norm_jvp(
109
+ const Tensor& self_p,
110
+ const Tensor& self_t,
111
+ const Scalar& scalar_ord,
112
+ Tensor norm,
113
+ const at::OptionalIntArrayRef& opt_dim,
114
+ bool keepdim);
115
+ at::Tensor linalg_vector_norm_backward(
116
+ at::Tensor grad,
117
+ const at::Tensor& self,
118
+ const at::Scalar& ord,
119
+ at::Tensor norm,
120
+ const at::OptionalIntArrayRef& opt_dim,
121
+ bool keepdim);
122
+ at::Tensor pow_backward(
123
+ at::Tensor grad,
124
+ const at::Tensor& self,
125
+ const at::Scalar& exponent_);
126
+ at::Tensor pow_backward_self(
127
+ const at::Tensor& grad,
128
+ const at::Tensor& self,
129
+ const at::Tensor& exponent);
130
+ at::Tensor pow_backward_exponent(
131
+ const at::Tensor& grad,
132
+ const at::Tensor& self,
133
+ const at::Tensor& exponent,
134
+ const at::Tensor& result);
135
+ at::Tensor pow_backward_exponent(
136
+ const at::Tensor& grad,
137
+ const at::Scalar& base,
138
+ const at::Tensor& exponent,
139
+ const at::Tensor& result);
140
+ at::Tensor angle_backward(const at::Tensor& grad, const at::Tensor& self);
141
+ template <typename T>
142
+ at::Tensor mul_tensor_backward(const Tensor& grad, T other, ScalarType self_st);
143
+ template <typename T>
144
+ at::Tensor div_tensor_self_backward(
145
+ const Tensor& grad,
146
+ T other,
147
+ ScalarType self_st);
148
+ at::Tensor div_tensor_other_backward(
149
+ const Tensor& grad,
150
+ const Tensor& self,
151
+ const Tensor& other);
152
+ template <typename T>
153
+ at::Tensor div_tensor_self_backward(
154
+ const Tensor& grad,
155
+ T other,
156
+ ScalarType self_st,
157
+ const c10::optional<c10::string_view>& rounding_mode);
158
+ at::Tensor div_tensor_other_backward(
159
+ const Tensor& grad,
160
+ const Tensor& self,
161
+ const Tensor& other,
162
+ const c10::optional<c10::string_view>& rounding_mode);
163
+ at::Tensor mvlgamma_backward(
164
+ const at::Tensor& grad,
165
+ const at::Tensor& self,
166
+ int64_t p);
167
+ at::Tensor permute_backwards(const at::Tensor& grad, at::IntArrayRef fwd_dims);
168
+ at::Tensor rad2deg_backward(const at::Tensor& grad);
169
+ at::Tensor deg2rad_backward(const at::Tensor& grad);
170
+ at::Tensor unsqueeze_multiple(
171
+ const at::Tensor& t,
172
+ at::OptionalIntArrayRef opt_dim,
173
+ size_t n_dims);
174
+ at::Tensor sum_backward(
175
+ const at::Tensor& grad,
176
+ at::SymIntArrayRef sizes,
177
+ at::OptionalIntArrayRef opt_dims,
178
+ bool keepdim);
179
+ at::Tensor sum_backward(
180
+ const at::Tensor& grad,
181
+ c10::SymIntArrayRef sizes,
182
+ c10::IntArrayRef dims,
183
+ bool keepdim);
184
+ at::Tensor nansum_backward(
185
+ const at::Tensor& grad,
186
+ const at::Tensor& self,
187
+ at::OptionalIntArrayRef dims,
188
+ bool keepdim);
189
+ std::vector<int64_t> reverse_list(const at::IntArrayRef list);
190
+ std::vector<c10::SymInt> reverse_list_symint(const c10::SymIntArrayRef list);
191
+ at::Tensor reverse_dim(const at::Tensor& t, int64_t dim);
192
+ at::Tensor prod_safe_zeros_backward(
193
+ const at::Tensor& grad,
194
+ const at::Tensor& inp,
195
+ int64_t dim);
196
+ at::Tensor prod_backward(
197
+ const at::Tensor& grad,
198
+ const at::Tensor& input,
199
+ const at::Tensor& result);
200
+ at::Tensor prod_backward(
201
+ at::Tensor grad,
202
+ const at::Tensor& input,
203
+ at::Tensor result,
204
+ int64_t dim,
205
+ bool keepdim);
206
+ at::Tensor solve_jvp(
207
+ const Tensor& X,
208
+ const Tensor& A,
209
+ const Tensor& dA,
210
+ const Tensor& dB);
211
+ at::Tensor solve_backward_self(
212
+ const at::Tensor& grad,
213
+ const at::Tensor& self,
214
+ const at::Tensor& A);
215
+ at::Tensor solve_backward_A(
216
+ const at::Tensor& grad,
217
+ const at::Tensor& self,
218
+ const at::Tensor& A,
219
+ const at::Tensor& solution);
220
+ at::Tensor cumsum_backward(const at::Tensor& grad, int64_t dim);
221
+ at::Tensor logsumexp_backward(
222
+ at::Tensor grad,
223
+ const at::Tensor& self,
224
+ at::Tensor result,
225
+ at::IntArrayRef dim,
226
+ bool keepdim);
227
+ at::Tensor logsumexp_jvp(
228
+ const at::Tensor& self_p,
229
+ const at::Tensor& self_t,
230
+ IntArrayRef dim,
231
+ bool keepdim);
232
+ at::Tensor logcumsumexp_backward(
233
+ at::Tensor grad,
234
+ const at::Tensor& self,
235
+ at::Tensor result,
236
+ int64_t dim);
237
+ at::Tensor logcumsumexp_jvp(
238
+ const at::Tensor& self_p,
239
+ const at::Tensor& self_t,
240
+ int64_t dim);
241
+ at::Tensor unbind_backward(const variable_list& grads, int64_t dim);
242
+ at::Tensor unbind_backward_nested(
243
+ const variable_list& grads,
244
+ const Tensor& nt_sizes,
245
+ int64_t dim,
246
+ const at::TensorOptions& options);
247
+ at::Tensor unsqueeze_to(const at::Tensor& self, c10::SymIntArrayRef sym_sizes);
248
+ at::Tensor unsqueeze_to(
249
+ const at::Tensor& self,
250
+ int64_t dim,
251
+ c10::SymIntArrayRef sym_sizes);
252
+ at::Tensor unsqueeze_to(
253
+ const at::Tensor& self,
254
+ IntArrayRef dim,
255
+ c10::SymIntArrayRef sym_sizes);
256
+ std::vector<at::Tensor> cat_tensors_backward(
257
+ const at::Tensor& grad,
258
+ const std::vector<std::vector<c10::SymInt>>& sizes,
259
+ const std::vector<ScalarType>& dtypes,
260
+ int64_t dim);
261
+ std::vector<at::Tensor> stack_tensors_backward(
262
+ const at::Tensor& grad,
263
+ int64_t dim,
264
+ const std::vector<ScalarType>& dtypes);
265
+ std::vector<at::Tensor> block_diag_backward(
266
+ const at::Tensor& grad,
267
+ const std::vector<std::vector<int64_t>>& sizes,
268
+ const std::vector<ScalarType>& dtypes);
269
+ at::Tensor clamp_backward(
270
+ const at::Tensor& grad,
271
+ const at::Tensor& self,
272
+ const optional<at::Scalar>& min,
273
+ const optional<at::Scalar>& max);
274
+ at::Tensor clamp_backward(
275
+ const at::Tensor& grad,
276
+ const at::Tensor& self,
277
+ const at::Tensor& min,
278
+ const at::Tensor& max);
279
+ std::tuple<at::Tensor, at::Tensor> clamp_backward_min_max(
280
+ const at::Tensor& grad,
281
+ const at::Tensor& self,
282
+ const at::Tensor& min,
283
+ const at::Tensor& max,
284
+ const std::array<bool, 2>&);
285
+ at::Tensor clamp_jvp(
286
+ const Tensor& self_p,
287
+ const Tensor& self_t,
288
+ const Tensor& min_p,
289
+ const Tensor& min_t,
290
+ const Tensor& max_p,
291
+ const Tensor& max_t);
292
+ at::SymIntArrayRef strides_or_error(
293
+ const Tensor& input,
294
+ c10::string_view const& input_name);
295
+ at::Tensor mm_mat1_backward(
296
+ const Tensor& grad,
297
+ const Tensor& mat2,
298
+ at::SymIntArrayRef mat1_sizes,
299
+ at::SymIntArrayRef mat1_strides,
300
+ c10::Layout mat1_layout,
301
+ const Scalar& alpha);
302
+ at::Tensor mm_mat2_backward(
303
+ const at::Tensor& grad,
304
+ const at::Tensor& mat1,
305
+ at::SymIntArrayRef sizes,
306
+ at::SymIntArrayRef strides,
307
+ c10::Layout layout,
308
+ const at::Scalar& alpha);
309
+ at::Tensor mm_mat1_sparse_backward(
310
+ const at::Tensor& grad,
311
+ const at::Tensor& mat1,
312
+ const at::Tensor& mat2,
313
+ const at::Scalar& alpha);
314
+ std::tuple<Tensor, Tensor, Tensor> sparse_sampled_addmm_backward(
315
+ const Tensor& grad,
316
+ const Tensor& self,
317
+ const c10::optional<Tensor>& mat1,
318
+ const c10::optional<Tensor>& mat2,
319
+ const Scalar& alpha,
320
+ const Scalar& beta,
321
+ const std::array<bool, 3>& grad_input_mask);
322
+ at::Tensor sparse_mask_backward(
323
+ const at::Tensor& grad,
324
+ const at::Tensor& mask,
325
+ c10::Layout self_layout);
326
+ at::Tensor sparse_sparse_matmul_backward(
327
+ const at::Tensor& grad,
328
+ const at::Tensor& mat1,
329
+ const at::Tensor& mat2,
330
+ int64_t grad_order);
331
+ at::Tensor renorm_backward(
332
+ const at::Tensor& grad,
333
+ const at::Tensor& self,
334
+ const at::Scalar& p,
335
+ int64_t dim,
336
+ const at::Scalar& maxnorm);
337
+ at::Tensor renorm_jvp(
338
+ const at::Tensor& self_p,
339
+ const at::Tensor& self_t,
340
+ const at::Scalar& p,
341
+ int64_t dim,
342
+ const at::Scalar& maxnorm);
343
+ at::Tensor repeat_backward(
344
+ at::Tensor grad,
345
+ at::SymIntArrayRef repeats,
346
+ at::SymIntArrayRef input_shape);
347
+ at::Tensor _fused_dropout_backward(
348
+ const at::Tensor& grad,
349
+ const at::Tensor& mask,
350
+ double p1m);
351
+ at::Tensor infinitely_differentiable_native_dropout_backward(
352
+ const at::Tensor& grad,
353
+ const at::Tensor& mask,
354
+ double scale);
355
+ at::Tensor native_dropout_double_backward(
356
+ const at::Tensor& ggI,
357
+ const at::Tensor& grad,
358
+ const at::Tensor& mask,
359
+ double scale);
360
+ at::Tensor evenly_distribute_backward(
361
+ const at::Tensor& grad,
362
+ const at::Tensor& input,
363
+ const at::Tensor& value);
364
+ Tensor sgn_backward(const Tensor& x, const Tensor& gx, const Tensor& sgn);
365
+ Tensor masked_fill_backward(const Tensor& grad, const Tensor& mask);
366
+ at::Tensor var_backward(
367
+ at::Tensor grad,
368
+ const at::Tensor& self,
369
+ at::OptionalIntArrayRef dim,
370
+ const c10::optional<c10::Scalar>& correction,
371
+ bool keepdim);
372
+ at::Tensor var_jvp(
373
+ const at::Tensor& self_t,
374
+ const at::Tensor& self_p,
375
+ const at::Tensor& result,
376
+ at::OptionalIntArrayRef dim_opt,
377
+ const c10::optional<c10::Scalar>& correction,
378
+ bool keepdim);
379
+ at::Tensor std_backward(
380
+ const at::Tensor& result,
381
+ const at::Tensor& grad,
382
+ const at::Tensor& self,
383
+ at::OptionalIntArrayRef dim,
384
+ const c10::optional<c10::Scalar>& correction,
385
+ bool keepdim);
386
+ Tensor mean_backward(
387
+ const Tensor& grad,
388
+ c10::SymIntArrayRef shape,
389
+ at::OptionalIntArrayRef opt_dim,
390
+ c10::SymInt numel,
391
+ bool keepdim);
392
+ Tensor var_mean_backward(
393
+ const Tensor& gvar,
394
+ const Tensor& gmean,
395
+ const Tensor& self,
396
+ at::OptionalIntArrayRef dim_opt,
397
+ const c10::optional<c10::Scalar>& correction,
398
+ bool keepdim);
399
+ Tensor std_mean_backward(
400
+ const Tensor& gstd,
401
+ const Tensor& gmean,
402
+ const Tensor& self,
403
+ const Tensor& std,
404
+ at::OptionalIntArrayRef dim_opt,
405
+ const c10::optional<c10::Scalar>& correction,
406
+ bool keepdim);
407
+ at::Tensor cholesky_backward(
408
+ const at::Tensor& grad,
409
+ bool upper,
410
+ const at::Tensor& L);
411
+ at::Tensor cholesky_jvp(
412
+ const at::Tensor& input_tangent,
413
+ const at::Tensor& L,
414
+ bool upper);
415
+ at::Tensor cholesky_inverse_backward(
416
+ const at::Tensor& grad,
417
+ const at::Tensor& L,
418
+ bool upper,
419
+ const at::Tensor& inverse);
420
+ at::Tensor cholesky_inverse_jvp(
421
+ const at::Tensor& F,
422
+ const at::Tensor& dF,
423
+ const at::Tensor& X,
424
+ bool upper);
425
+ Tensor pinv_jvp(const Tensor& A, const Tensor& pinvA, const Tensor& dA);
426
+ Tensor pinv_backward(const Tensor& grad, const Tensor& pinvA, const Tensor& A);
427
+ at::Tensor split_with_sizes_backward(
428
+ const std::vector<torch::autograd::Variable>& grads,
429
+ c10::SymIntArrayRef split_sizes,
430
+ int64_t dim,
431
+ c10::SymIntArrayRef sizes,
432
+ const at::TensorOptions& options);
433
+ at::Tensor _nested_split_with_sizes_backward(
434
+ const std::vector<torch::autograd::Variable>& grads,
435
+ c10::SymIntArrayRef split_sizes,
436
+ int64_t dim,
437
+ const Tensor& nt_sizes,
438
+ const at::TensorOptions& options);
439
+ at::Tensor split_backward(
440
+ const std::vector<torch::autograd::Variable>& grads,
441
+ const c10::SymInt& split_size,
442
+ int64_t dim,
443
+ c10::SymIntArrayRef sizes,
444
+ const at::TensorOptions& options);
445
+ at::Tensor max_pool_double_backward(
446
+ const at::Tensor& grad,
447
+ const at::Tensor& indices,
448
+ int dim);
449
+ at::Tensor error_for_max_pool2d_double_backward();
450
+ at::Tensor glu_double_backward(
451
+ const at::Tensor& grad,
452
+ const at::Tensor& grad_output,
453
+ const at::Tensor& input,
454
+ int64_t dim);
455
+ at::Tensor glu_double_backward_grad_output(
456
+ const at::Tensor& grad,
457
+ const at::Tensor& input,
458
+ int64_t dim);
459
+ at::Tensor infinitely_differentiable_silu_backward(
460
+ const at::Tensor& grad_output,
461
+ const at::Tensor& input);
462
+ at::Tensor infinitely_differentiable_mish_backward(
463
+ const at::Tensor& grad_output,
464
+ const at::Tensor& input);
465
+ Tensor infinitely_differentiable_logit_backward(
466
+ const Tensor& grad,
467
+ const Tensor& self,
468
+ c10::optional<double> eps);
469
+ Tensor binary_cross_entropy_target_backward(
470
+ const Tensor& grad,
471
+ const Tensor& self,
472
+ const Tensor& target,
473
+ const c10::optional<Tensor>& weight,
474
+ int64_t reduction);
475
+ Tensor binary_cross_entropy_double_backward_target(
476
+ const Tensor& grad,
477
+ const Tensor& grad_output,
478
+ const Tensor& self,
479
+ const Tensor& target,
480
+ const c10::optional<Tensor>& weight,
481
+ int64_t reduction);
482
+ Tensor binary_cross_entropy_with_logits_backward(
483
+ const Tensor& grad,
484
+ const Tensor& input,
485
+ const Tensor& target,
486
+ const c10::optional<Tensor>& weight_opt,
487
+ const c10::optional<Tensor>& pos_weight_opt,
488
+ int64_t reduction);
489
+ at::Tensor binary_cross_entropy_with_logits_target_backward(
490
+ const at::Tensor& grad_output,
491
+ const at::Tensor& self,
492
+ const at::Tensor& target,
493
+ const c10::optional<at::Tensor>& weight,
494
+ const c10::optional<at::Tensor>& pos_weight,
495
+ int64_t reduction);
496
+ at::Tensor log_sigmoid_double_backward(
497
+ const at::Tensor& grad,
498
+ const at::Tensor& input);
499
+ at::Tensor softmax_double_backward(
500
+ const at::Tensor& grad,
501
+ const at::Tensor& grad_output,
502
+ int dim,
503
+ const at::Tensor& output);
504
+ at::Tensor binary_cross_entropy_double_backward(
505
+ const at::Tensor& grad_output,
506
+ const at::Tensor& grad,
507
+ const at::Tensor& input,
508
+ const at::Tensor& target,
509
+ const c10::optional<at::Tensor>& weight,
510
+ int64_t reduction);
511
+ at::Tensor binary_cross_entropy_double_backward_grad_output(
512
+ const at::Tensor& grad,
513
+ const at::Tensor& input,
514
+ const at::Tensor& target,
515
+ const c10::optional<at::Tensor>& weight,
516
+ int64_t reduction);
517
+ at::Tensor smooth_l1_loss_double_backward(
518
+ const at::Tensor& grad,
519
+ const at::Tensor& input,
520
+ const at::Tensor& target,
521
+ int64_t reduction,
522
+ double beta);
523
+ at::Tensor huber_loss_double_backward(
524
+ const at::Tensor& grad,
525
+ const at::Tensor& input,
526
+ const at::Tensor& target,
527
+ int64_t reduction,
528
+ double delta);
529
+ at::Tensor huber_loss_double_backward_grad_output(
530
+ const at::Tensor& grad,
531
+ const at::Tensor& grad_output,
532
+ const at::Tensor& input,
533
+ const at::Tensor& target,
534
+ int64_t reduction,
535
+ double delta);
536
+ at::Tensor mse_loss_double_backward(
537
+ const at::Tensor& grad,
538
+ const at::Tensor& input,
539
+ int64_t reduction);
540
+ at::Tensor soft_margin_loss_double_backward(
541
+ const at::Tensor& grad,
542
+ const at::Tensor& input,
543
+ const at::Tensor& target,
544
+ int64_t reduction);
545
+ at::Tensor soft_margin_loss_double_backward_grad_output(
546
+ const at::Tensor& grad,
547
+ const at::Tensor& grad_output,
548
+ const at::Tensor& input,
549
+ const at::Tensor& target,
550
+ int64_t reduction);
551
+ at::Tensor softplus_double_backward(
552
+ const at::Tensor& grad,
553
+ const at::Tensor& input,
554
+ const at::Scalar& beta,
555
+ const at::Scalar& threshold);
556
+ std::tuple<at::Tensor, at::Tensor> slogdet_jvp(
557
+ const at::Tensor& LU,
558
+ const at::Tensor& pivots,
559
+ const at::Tensor& dA,
560
+ const at::Tensor& sign,
561
+ const bool use_A_T);
562
+ at::Tensor slogdet_backward(
563
+ const at::Tensor& grad_sign,
564
+ const at::Tensor& grad_logabsdet,
565
+ const at::Tensor& A,
566
+ const at::Tensor& signdet,
567
+ const at::Tensor& LU,
568
+ const at::Tensor& pivots);
569
+ at::Tensor log1p_backward(const at::Tensor& grad, const at::Tensor& self);
570
+ at::Tensor sinc_backward(const at::Tensor& grad, const at::Tensor& self);
571
+ at::Tensor sparse_constructor_values_backward(
572
+ const at::Tensor& sparse_grad_out,
573
+ const at::Tensor& indices);
574
+ at::Tensor embedding_dense_double_backward_symint(
575
+ const at::Tensor& grad,
576
+ const at::Tensor& indices,
577
+ const c10::SymInt& padding_idx);
578
+ at::Tensor index_backward(
579
+ at::Tensor zeros_like_self,
580
+ const torch::List<c10::optional<Tensor>>& indices,
581
+ const at::Tensor& grad);
582
+ at::Tensor _cudnn_ctc_loss_backward(
583
+ const at::Tensor& grad_out,
584
+ const at::Tensor& loss,
585
+ const at::Tensor& raw_grad,
586
+ bool zero_infinity);
587
+ at::Tensor elu_double_backward(
588
+ const Tensor& grad,
589
+ const Tensor& grad_output,
590
+ const Scalar& alpha,
591
+ const Scalar& scale,
592
+ const Scalar& input_scale,
593
+ bool is_result,
594
+ const Tensor& self_or_result);
595
+
596
+ Tensor svd_backward(
597
+ const Tensor& gU,
598
+ const Tensor& gS,
599
+ const Tensor& gVh,
600
+ const Tensor& U,
601
+ const Tensor& S,
602
+ const Tensor& Vh);
603
+
604
+ std::tuple<Tensor, Tensor, Tensor> linalg_svd_jvp(
605
+ const Tensor& dA,
606
+ const Tensor& U,
607
+ const Tensor& S,
608
+ const Tensor& Vh,
609
+ const bool full_matrices);
610
+ Tensor slice_backward_wrapper(
611
+ const at::Tensor& grad,
612
+ const c10::SymIntArrayRef& input_sizes,
613
+ int64_t dim,
614
+ c10::optional<c10::SymInt> start,
615
+ c10::optional<c10::SymInt> end,
616
+ c10::SymInt step);
617
+ std::tuple<Tensor, Tensor> linalg_eig_jvp(
618
+ const Tensor& dA,
619
+ const Tensor& L,
620
+ const Tensor& V,
621
+ const bool is_hermitian);
622
+ Tensor linalg_eig_backward(
623
+ const Tensor& gL,
624
+ const Tensor& gV,
625
+ const Tensor& L,
626
+ const Tensor& V,
627
+ const bool is_hermitian,
628
+ const bool symeig_eigenvectors = true);
629
+ Tensor linalg_lstsq_jvp(
630
+ const Tensor& A,
631
+ const Tensor& B,
632
+ const Tensor& dA,
633
+ const Tensor& dB);
634
+ std::tuple<Tensor, Tensor> triangular_solve_backward(
635
+ const Tensor& grad_x,
636
+ const Tensor& grad_m,
637
+ const Tensor& b,
638
+ const Tensor& a,
639
+ const Tensor& x,
640
+ const bool upper,
641
+ const bool transpose,
642
+ const bool unitriangular,
643
+ std::array<bool, 2> output_mask);
644
+ Tensor triangular_solve_jvp(
645
+ const Tensor& X,
646
+ const Tensor& A,
647
+ const Tensor& dA,
648
+ const Tensor& dB,
649
+ const bool upper,
650
+ const bool transpose,
651
+ const bool unitriangular);
652
+ Tensor linalg_solve_triangular_forward_AD(
653
+ const Tensor& A_t,
654
+ const Tensor& B_t,
655
+ const Tensor& A,
656
+ const Tensor& X,
657
+ const bool upper,
658
+ const bool left,
659
+ const bool unitriangular);
660
+ std::tuple<Tensor, Tensor> linalg_solve_triangular_backward(
661
+ const Tensor& grad,
662
+ const Tensor& A,
663
+ const Tensor& X,
664
+ const bool upper,
665
+ const bool left,
666
+ const bool unitriangular,
667
+ std::array<bool, 2> output_mask);
668
+ std::tuple<Tensor, Tensor, Tensor> _trilinear_backward(
669
+ const Tensor& grad_out,
670
+ const c10::optional<Tensor>& i1,
671
+ const c10::optional<Tensor>& i2,
672
+ const c10::optional<Tensor>& i3,
673
+ IntArrayRef expand1,
674
+ IntArrayRef expand2,
675
+ IntArrayRef expand3,
676
+ IntArrayRef sumdim,
677
+ std::array<bool, 3> grad_mask);
678
+ std::tuple<Tensor, Tensor> linalg_qr_jvp(
679
+ const Tensor& dA,
680
+ const Tensor& Q,
681
+ const Tensor& R,
682
+ const c10::string_view mode);
683
+ Tensor linalg_qr_backward(
684
+ const Tensor& gQ,
685
+ const Tensor& gR,
686
+ const Tensor& Q,
687
+ const Tensor& R,
688
+ const c10::string_view mode);
689
+ Tensor linalg_matrix_exp_differential(
690
+ const Tensor& self,
691
+ const Tensor& grad,
692
+ bool adjoint);
693
+ std::tuple<Tensor, Tensor, Tensor> batchnorm_double_backward(
694
+ const Tensor& input,
695
+ const c10::optional<Tensor>& gamma,
696
+ const Tensor& ggI,
697
+ const Tensor& ggG,
698
+ const Tensor& ggB,
699
+ const Tensor& gO,
700
+ const c10::optional<Tensor>& running_mean,
701
+ const c10::optional<Tensor>& running_var,
702
+ bool training,
703
+ double eps,
704
+ const c10::optional<Tensor>& save_mean,
705
+ const c10::optional<Tensor>& save_invstd,
706
+ std::array<bool, 3> output_mask);
707
+ std::tuple<Tensor, Tensor> _euclidean_dist_backward(
708
+ const Tensor& grad,
709
+ const Tensor& x1,
710
+ const Tensor& x2,
711
+ const Tensor& res);
712
+ Tensor fft_backward(
713
+ const Tensor& self,
714
+ const Tensor& grad,
715
+ int64_t signal_ndim,
716
+ bool complex_input,
717
+ bool complex_output,
718
+ bool inverse,
719
+ IntArrayRef checked_signal_sizes,
720
+ int64_t normalization,
721
+ bool onesided,
722
+ IntArrayRef output_sizes);
723
+ Tensor fft_r2c_backward(
724
+ const Tensor& grad,
725
+ at::IntArrayRef dim,
726
+ int64_t normalization,
727
+ bool onesided,
728
+ const c10::SymInt& last_dim_size);
729
+ Tensor fft_c2r_backward(
730
+ const Tensor& grad,
731
+ IntArrayRef dim,
732
+ int64_t normalization);
733
+ Tensor constant_pad_nd_backward(const Tensor& grad, c10::SymIntArrayRef pad);
734
+ std::tuple<Tensor, Tensor> cholesky_solve_backward(
735
+ const Tensor& grad_x,
736
+ const Tensor& self,
737
+ const Tensor& input2,
738
+ const Tensor& result,
739
+ const bool upper,
740
+ std::array<bool, 2> output_mask);
741
+ Tensor cholesky_solve_jvp(
742
+ const Tensor& X,
743
+ const Tensor& U,
744
+ const Tensor& dU,
745
+ const Tensor& dB,
746
+ const bool upper);
747
+ std::tuple<Tensor, Tensor, Tensor>
748
+ infinitely_differentiable_native_group_norm_backward(
749
+ const Tensor& dY,
750
+ const Tensor& dmean,
751
+ const Tensor& drstd,
752
+ const Tensor& X,
753
+ const Tensor& mean,
754
+ const Tensor& rstd,
755
+ const c10::optional<Tensor>& gamma,
756
+ c10::SymInt N,
757
+ const c10::SymInt& C,
758
+ c10::SymInt HxW,
759
+ int64_t group,
760
+ double eps,
761
+ std::array<bool, 3> grad_input_mask);
762
+ Tensor gelu_double_backward(
763
+ const Tensor& ggI,
764
+ const Tensor& gO,
765
+ const Tensor& input,
766
+ c10::string_view approximate);
767
+ Tensor as_strided_backward(
768
+ Tensor grad,
769
+ const TensorGeometry& input_geometry,
770
+ c10::SymIntArrayRef sizes,
771
+ c10::SymIntArrayRef strides,
772
+ const optional<c10::SymInt>& storage_offset_);
773
+ Tensor as_strided_scatter_backward(
774
+ const Tensor& grad,
775
+ const TensorGeometry& input_geometry,
776
+ const TensorGeometry& src_geometry,
777
+ c10::SymIntArrayRef sizes,
778
+ c10::SymIntArrayRef strides,
779
+ optional<c10::SymInt> storage_offset);
780
+ std::tuple<Tensor, Tensor> atan2_backward(
781
+ const Tensor& grad,
782
+ const Tensor& self,
783
+ const Tensor& other,
784
+ std::array<bool, 2> output_mask);
785
+ Tensor amaxamin_jvp(
786
+ const Tensor& x,
787
+ const Tensor& dx,
788
+ const Tensor& result,
789
+ IntArrayRef dim,
790
+ bool keepdim);
791
+ std::tuple<Tensor, Tensor, Tensor> layer_norm_double_backward(
792
+ const Tensor& input,
793
+ const c10::optional<Tensor>& gamma,
794
+ const Tensor& ggI,
795
+ const Tensor& ggG,
796
+ const Tensor& ggB,
797
+ const Tensor& gO,
798
+ const Tensor& save_mean,
799
+ const Tensor& save_invstd,
800
+ c10::SymIntArrayRef normalized_shape,
801
+ std::array<bool, 3> output_mask);
802
+
803
+ std::tuple<Tensor, Tensor> householder_product_backward(
804
+ const Tensor& grad,
805
+ const Tensor& result,
806
+ const Tensor& input,
807
+ const Tensor& tau,
808
+ const bool flip_order = false);
809
+ Tensor householder_product_jvp(
810
+ const Tensor& dV,
811
+ const Tensor& dtau,
812
+ const Tensor& prod,
813
+ const Tensor& V,
814
+ const Tensor& tau);
815
+ std::tuple<Tensor, Tensor, Tensor> ormqr_backward(
816
+ const Tensor& grad,
817
+ const Tensor& result,
818
+ const Tensor& self,
819
+ const Tensor& tau,
820
+ const Tensor& other,
821
+ bool left,
822
+ bool transpose,
823
+ std::array<bool, 3> grad_output_mask);
824
+ std::tuple<Tensor, Tensor> polar_backward(
825
+ const Tensor& grad,
826
+ const Tensor& result);
827
+ Tensor i1_backward(
828
+ const Tensor& grad,
829
+ const Tensor& self,
830
+ const Tensor& result);
831
+ Tensor i1e_backward(
832
+ const Tensor& grad,
833
+ const Tensor& self,
834
+ const Tensor& result);
835
+ Tensor linalg_lu_solve_LU(
836
+ const Tensor& grad,
837
+ const Tensor& LU,
838
+ const Tensor& pivots,
839
+ const Tensor& X,
840
+ const bool left,
841
+ const bool adjoint);
842
+ Tensor linalg_lu_solve_jvp(
843
+ const Tensor& X,
844
+ const Tensor& LU,
845
+ const Tensor& pivots,
846
+ const Tensor& dLU,
847
+ const Tensor& dB,
848
+ const bool left,
849
+ const bool adjoint);
850
+ std::tuple<Tensor, Tensor> linalg_solve_backward(
851
+ const Tensor& gX,
852
+ const Tensor& X,
853
+ const Tensor& A,
854
+ const Tensor& LU,
855
+ const Tensor& pivots,
856
+ const bool left,
857
+ const bool B_requires_grad);
858
+ Tensor linalg_solve_jvp(
859
+ const Tensor& dA,
860
+ const Tensor& dB,
861
+ const Tensor& X,
862
+ const Tensor& LU,
863
+ const Tensor& pivots,
864
+ const bool left,
865
+ const bool use_A_T);
866
+ Tensor lu_unpack_backward(
867
+ const Tensor& L_grad,
868
+ const Tensor& U_grad,
869
+ const c10::SymInt& m,
870
+ const c10::SymInt& n);
871
+
872
+ Tensor linalg_det_backward(
873
+ const Tensor& grad,
874
+ const Tensor& det,
875
+ const Tensor& A,
876
+ const Tensor& LU,
877
+ const Tensor& pivots);
878
+ Tensor linalg_det_jvp(
879
+ const Tensor& dA,
880
+ const Tensor& det,
881
+ const Tensor& LU,
882
+ const Tensor& pivots,
883
+ const bool use_A_T);
884
+ std::tuple<Tensor, Tensor> linalg_lstsq_backward(
885
+ const Tensor& grad,
886
+ const Tensor& A,
887
+ const Tensor& B_,
888
+ const std::array<bool, 2>& grad_input_mask);
889
+ Tensor linalg_lu_backward(
890
+ const Tensor& L_grad,
891
+ const Tensor& U_grad,
892
+ const Tensor& P,
893
+ const Tensor& L,
894
+ const Tensor& U,
895
+ const bool pivot);
896
+
897
+ std::tuple<Tensor, Tensor> linalg_lu_jvp(
898
+ const Tensor& dA,
899
+ const Tensor& P,
900
+ const Tensor& L,
901
+ const Tensor& U,
902
+ const bool pivot);
903
+
904
+ Tensor lu_factor_ex_backward(
905
+ const Tensor& grad,
906
+ const Tensor& LU,
907
+ const Tensor& pivs,
908
+ const bool pivot);
909
+ Tensor lu_factor_ex_jvp(
910
+ const Tensor& dX,
911
+ const Tensor& LU,
912
+ const Tensor& pivs,
913
+ const bool pivot);
914
+
915
+ Tensor batch_norm_jvp(
916
+ const Tensor& input_p,
917
+ const Tensor& input_t,
918
+ const Tensor& weight_p,
919
+ const Tensor& weight_t,
920
+ const Tensor& bias_p,
921
+ const Tensor& bias_t,
922
+ const c10::optional<Tensor>& running_mean,
923
+ const c10::optional<Tensor>& running_var,
924
+ const Tensor& saved_mean,
925
+ const Tensor& saved_invstd,
926
+ bool train,
927
+ double eps);
928
+
929
+ Tensor layer_norm_jvp(
930
+ const Tensor& input_p,
931
+ const Tensor& input_t,
932
+ const Tensor& weight_p,
933
+ const Tensor& weight_t,
934
+ const Tensor& bias_p,
935
+ const Tensor& bias_t,
936
+ const Tensor& saved_mean,
937
+ const Tensor& saved_invstd,
938
+ c10::SymIntArrayRef normalized_shape);
939
+
940
+ Tensor group_norm_jvp(
941
+ const Tensor& input_p,
942
+ const Tensor& input_t,
943
+ const Tensor& weight_p,
944
+ const Tensor& weight_t,
945
+ const Tensor& bias_p,
946
+ const Tensor& bias_t,
947
+ const Tensor& saved_mean,
948
+ const Tensor& saved_invstd,
949
+ int64_t groups);
950
+ Tensor group_norm_mean_jvp(
951
+ const Tensor& input_t,
952
+ const Tensor& mean_p,
953
+ int64_t groups);
954
+ Tensor group_norm_invstd_jvp(
955
+ const Tensor& input_p,
956
+ const Tensor& input_t,
957
+ const Tensor& mean_p,
958
+ const Tensor& invstd_p,
959
+ int64_t groups);
960
+
961
+ Tensor convolution_jvp(
962
+ const Tensor& input_p,
963
+ const Tensor& input_t,
964
+ const Tensor& weight_p,
965
+ const Tensor& weight_t,
966
+ const Tensor& bias_p,
967
+ const Tensor& bias_t,
968
+ at::SymIntArrayRef stride,
969
+ at::SymIntArrayRef padding,
970
+ at::SymIntArrayRef dilation,
971
+ bool transposed,
972
+ at::SymIntArrayRef output_padding,
973
+ const c10::SymInt& groups);
974
+
975
+ Tensor _convolution_jvp(
976
+ const Tensor& input_p,
977
+ const Tensor& input_t,
978
+ const Tensor& weight_p,
979
+ const Tensor& weight_t,
980
+ const Tensor& bias_p,
981
+ const Tensor& bias_t,
982
+ at::SymIntArrayRef stride,
983
+ at::SymIntArrayRef padding,
984
+ at::SymIntArrayRef dilation,
985
+ bool transposed,
986
+ at::SymIntArrayRef output_padding,
987
+ const c10::SymInt& groups,
988
+ bool benchmark,
989
+ bool deterministic,
990
+ bool cudnn_enabled,
991
+ bool allow_tf32);
992
+
993
+ Tensor convolution_backward_jvp_grad_bias(
994
+ const Tensor& grad_out_t,
995
+ const Tensor& grad_bias);
996
+
997
+ Tensor cat_jvp(const at::ITensorListRef& tensors, int64_t dim);
998
+ Tensor block_diag_jvp(at::TensorList tensors);
999
+ Tensor stack_jvp(at::TensorList tensors, int64_t dim);
1000
+ Tensor cumprod_jvp(
1001
+ const Tensor& self_t,
1002
+ const Tensor& self_p,
1003
+ const Tensor& result,
1004
+ int dim);
1005
+ Tensor gather_with_keepdimed_indices(
1006
+ const Tensor& input,
1007
+ int64_t dim,
1008
+ const Tensor& indices,
1009
+ bool keepdim);
1010
+ Tensor evenly_read_jvp(
1011
+ const Tensor& fw_grad,
1012
+ const Tensor& input,
1013
+ const Tensor& value);
1014
+ Tensor warn_backwards(const Tensor& grad_output);
1015
+
1016
+ std::tuple<Tensor, Tensor> _cudnn_convolution_backward(
1017
+ const at::Tensor& self,
1018
+ const at::Tensor& grad_output,
1019
+ const at::Tensor& weight,
1020
+ at::SymIntArrayRef padding,
1021
+ at::SymIntArrayRef output_padding,
1022
+ at::SymIntArrayRef stride,
1023
+ at::SymIntArrayRef dilation,
1024
+ bool transposed,
1025
+ c10::SymInt groups,
1026
+ ::std::array<bool, 2> output_mask);
1027
+
1028
+ Tensor scatter_reduce_jvp(
1029
+ const Tensor& self_p,
1030
+ const Tensor& self_t,
1031
+ int dim,
1032
+ const Tensor& index,
1033
+ const Tensor& src_p,
1034
+ const Tensor& src_t,
1035
+ c10::string_view reduce,
1036
+ bool include_self,
1037
+ const Tensor& result);
1038
+
1039
+ std::tuple<Tensor, Tensor> scatter_reduce_backward(
1040
+ const Tensor& grad,
1041
+ const Tensor& self,
1042
+ int dim,
1043
+ const Tensor& index,
1044
+ const Tensor& src,
1045
+ c10::string_view reduce,
1046
+ bool include_self,
1047
+ const Tensor& result);
1048
+
1049
+ Tensor _to_copy_backward(
1050
+ const Tensor& grad,
1051
+ const c10::TensorOptions& self_options);
1052
+
1053
+ std::tuple<Tensor, Tensor> index_reduce_backward(
1054
+ const Tensor& grad,
1055
+ const Tensor& self,
1056
+ int dim,
1057
+ const Tensor& index,
1058
+ const Tensor& source,
1059
+ c10::string_view reduce,
1060
+ bool include_self,
1061
+ const Tensor& result);
1062
+
1063
+ Tensor take_backward(
1064
+ const Tensor& grad,
1065
+ const Tensor& self,
1066
+ const Tensor& indices);
1067
+
1068
+ Tensor to_sparse_backward(
1069
+ const Tensor& grad,
1070
+ const c10::Layout self_layout,
1071
+ const c10::OptionalArrayRef<c10::SymInt>& self_blocksize);
1072
+
1073
+ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor>
1074
+ mkldnn_rnn_layer_differentiable_backward(
1075
+ const Tensor& input,
1076
+ const Tensor& weight0,
1077
+ const Tensor& weight1,
1078
+ const Tensor& weight2,
1079
+ const Tensor& weight3,
1080
+ const Tensor& hx_,
1081
+ const Tensor& cx_tmp,
1082
+ const Tensor& output,
1083
+ const Tensor& hy_,
1084
+ const Tensor& cy_,
1085
+ const c10::optional<Tensor>& grad_output_r_opt,
1086
+ const c10::optional<Tensor>& grad_hy_r_opt,
1087
+ const c10::optional<Tensor>& grad_cy_r_opt,
1088
+ bool reverse,
1089
+ int64_t mode,
1090
+ int64_t hidden_size,
1091
+ int64_t num_layers,
1092
+ bool has_biases,
1093
+ bool train,
1094
+ bool bidirectional,
1095
+ at::IntArrayRef batch_sizes,
1096
+ bool batch_first,
1097
+ const at::Tensor& workspace);
1098
+
1099
+ Tensor values_backward(const Tensor& grad, const Tensor& self);
1100
+
1101
+ } // namespace torch::autograd::generated::details
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/InferenceMode.h>
4
+ #include <torch/csrc/Export.h>
5
+
6
+ namespace torch::autograd {
7
+
8
+ using InferenceMode = c10::InferenceMode;
9
+
10
+ }
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <memory>
5
+ #include <string>
6
+
7
+ namespace torch::autograd {
8
+
9
+ // forward declaration of Node from function.h
10
+ struct Node;
11
+
12
+ struct TORCH_API AnomalyMode {
13
+ static bool is_enabled() {
14
+ return _enabled;
15
+ }
16
+ static bool should_check_nan() {
17
+ return _check_nan;
18
+ }
19
+ static void set_enabled(bool enabled, bool check_nan = true) {
20
+ _enabled = enabled;
21
+ _check_nan = check_nan;
22
+ }
23
+
24
+ private:
25
+ static bool _enabled;
26
+ static bool _check_nan;
27
+ };
28
+
29
+ /// A RAII guard that enables Anomaly Detection Mode.
30
+ ///
31
+ /// Anomaly detection mode is useful for debugging problems happening
32
+ /// in the backward, such as unexpectedly modified tensors or NaNs
33
+ /// occuring in the backward.
34
+ ///
35
+ /// The enabling of anomaly mode is global - as soon as there is one
36
+ /// such guard, it is enabled for all computation and threads. It also
37
+ /// comes with a significant performance penalty.
38
+ ///
39
+ /// Example:
40
+ /// @code
41
+ /// auto x = torch::tensor({1.}, torch::requires_grad());
42
+ /// {
43
+ /// torch::autograd::DetectAnomalyGuard detect_anomaly;
44
+ /// auto x = torch::tensor({5.0}, torch::requires_grad());
45
+ /// auto y = x * x;
46
+ /// auto z = y * y;
47
+ /// y += 1;
48
+ /// z.backward();
49
+ /// }
50
+ /// @endcode
51
+ class TORCH_API DetectAnomalyGuard {
52
+ public:
53
+ DetectAnomalyGuard(bool check_nan = true);
54
+ ~DetectAnomalyGuard();
55
+
56
+ private:
57
+ bool prev_check_nan_;
58
+ };
59
+
60
+ struct TORCH_API AnomalyMetadata {
61
+ virtual ~AnomalyMetadata();
62
+ virtual void store_stack();
63
+ virtual void print_stack(const std::string& current_node_name);
64
+ virtual void assign_parent(const std::shared_ptr<Node>& parent_node);
65
+
66
+ private:
67
+ std::string traceback_;
68
+ std::shared_ptr<Node> parent_;
69
+ };
70
+
71
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/variable.h>
4
+
5
+ namespace torch::autograd {
6
+
7
+ /// Computes the sum of gradients of given tensors with respect to graph leaves.
8
+ ///
9
+ /// The graph is differentiated using the chain rule. If any of ``tensors``
10
+ /// are non-scalar (i.e. their data has more than one element) and require
11
+ /// gradient, then the Jacobian-vector product would be computed, in this case
12
+ /// the function additionally requires specifying `grad_tensors`. It should be a
13
+ /// sequence of matching length, that contains the "vector" in the
14
+ /// Jacobian-vector product, usually the gradient of the differentiated function
15
+ /// w.r.t. corresponding tensors
16
+ /// (`torch::Tensor()` is an acceptable value for all tensors that don't need
17
+ /// gradient tensors).
18
+ ///
19
+ /// This function accumulates gradients in the leaves - you might need to zero
20
+ /// them before calling it.
21
+ ///
22
+ /// \param tensors Tensors of which the derivative will be computed.
23
+ /// \param grad_tensors The "vector" in the Jacobian-vector product, usually
24
+ /// gradients
25
+ /// w.r.t. each element of corresponding tensors. `torch::Tensor()` values
26
+ /// can be specified for scalar Tensors or ones that don't require grad. If
27
+ /// a `torch::Tensor()` value would be acceptable for all grad_tensors, then
28
+ /// this argument is optional.
29
+ /// \param retain_graph If `false`, the graph used to compute the grad will be
30
+ /// freed.
31
+ /// Note that in nearly all cases setting this option to `true` is not
32
+ /// needed and often can be worked around in a much more efficient way.
33
+ /// Defaults to the value of `create_graph`.
34
+ /// \param create_graph If `true`, graph of the derivative will be constructed,
35
+ /// allowing
36
+ /// to compute higher order derivative products. Defaults to `false`.
37
+ /// \param inputs Inputs w.r.t. which the gradient will be accumulated into
38
+ /// `at::Tensor::grad`. All other Tensors will be ignored. If not provided,
39
+ /// the gradient is accumulated into all the leaf Tensors that were used to
40
+ /// compute param `tensors`.
41
+ // When inputs are provided and a given input is not a leaf,
42
+ // the current implementation will call its grad_fn (even though it is not
43
+ // strictly needed to get this gradients). It is an implementation detail
44
+ // on which the user should not rely. See
45
+ // https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for
46
+ // more details.
47
+ TORCH_API void backward(
48
+ const variable_list& tensors,
49
+ const variable_list& grad_tensors = {},
50
+ c10::optional<bool> retain_graph = c10::nullopt,
51
+ bool create_graph = false,
52
+ const variable_list& inputs = {});
53
+
54
+ /// Computes and returns the sum of gradients of outputs with respect to the
55
+ /// inputs.
56
+ ///
57
+ /// ``grad_outputs`` should be a sequence of length matching ``output``
58
+ /// containing the "vector" in Jacobian-vector product, usually the pre-computed
59
+ /// gradients w.r.t. each of the outputs. If an output doesn't require_grad,
60
+ /// then the gradient can be ``torch::Tensor()``).
61
+ ///
62
+ /// \param outputs outputs of the differentiated function.
63
+ /// \param inputs Inputs w.r.t. which the gradient will be
64
+ /// returned (and not accumulated into ``at::Tensor::grad``).
65
+ /// \param grad_outputs The "vector" in the Jacobian-vector product.
66
+ /// Usually gradients w.r.t. each output. `torch::Tensor()` values can be
67
+ /// specified for scalar Tensors or ones that don't require grad. If a
68
+ /// `torch::Tensor()` value would be acceptable for all grad_tensors, then
69
+ /// this argument is optional. Default: `{}`.
70
+ /// \param retain_graph If ``false``, the graph used to compute the grad
71
+ /// will be freed. Note that in nearly all cases setting this option to
72
+ /// ``true`` is not needed and often can be worked around in a much more
73
+ /// efficient way. Defaults to the value of ``create_graph``.
74
+ /// \param create_graph If ``true``, graph of the derivative will
75
+ /// be constructed, allowing to compute higher order derivative products.
76
+ /// Default: ``false``.
77
+ /// \param allow_unused If ``false``, specifying inputs that were not
78
+ /// used when computing outputs (and therefore their grad is always zero)
79
+ /// is an error. Defaults to ``false``.
80
+ TORCH_API variable_list grad(
81
+ const variable_list& outputs,
82
+ const variable_list& inputs,
83
+ const variable_list& grad_outputs = {},
84
+ c10::optional<bool> retain_graph = c10::nullopt,
85
+ bool create_graph = false,
86
+ bool allow_unused = false);
87
+
88
+ namespace forward_ad {
89
+
90
+ /// Creates a new dual level and returns its index. This level index should then
91
+ /// be used to call into the other functions below. This API supports entering a
92
+ /// new level before the previous one is exited. We call them nested forward AD
93
+ /// levels. These can be used to compute higher order derivatives.
94
+ TORCH_API uint64_t enter_dual_level();
95
+
96
+ /// Exits the given level. This will clear up all the gradients from this level
97
+ /// and all dual Tensors that had gradients for this level will become regular
98
+ /// Tensors again. This function can only be used to exit the innermost nesting
99
+ /// level and so exiting must happen in reverse order compared to the entering
100
+ /// that was done with the function above.
101
+ TORCH_API void exit_dual_level(uint64_t level);
102
+
103
+ } // namespace forward_ad
104
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/library.h>
4
+
5
+ namespace torch::autograd {
6
+
7
+ // Default DispatchKey::Autograd fallback for built-in operators.
8
+ // Can be registered for custom operators.
9
+ TORCH_API torch::CppFunction autogradNotImplementedFallback();
10
+
11
+ // Default DispatchKey::AdInplaceOrView fallback for built-in operators
12
+ // Can be registered for custom operators.
13
+ TORCH_API torch::CppFunction autogradNotImplementedInplaceOrViewFallback();
14
+
15
+ // Default DispatchKey::Autograd fallback for all other operators (i.e. custom
16
+ // operators)
17
+ TORCH_API torch::CppFunction basicAutogradNotImplementedFallback();
18
+
19
+ enum class AutogradFallbackMode {
20
+ Nothing, // Fallback is a redispatch
21
+ Warn, // Fallback raises a warning if backward is called
22
+ Error, // Fallback raises an error if backward is called
23
+ };
24
+
25
+ // Change the behavior of "basicAutogradNotImplementedFallback"
26
+ // In Python this is:
27
+ // - torch._C._set_autograd_fallback_mode(str) -> None
28
+ // - torch._C._get_autograd_fallback_mode() -> str
29
+ TORCH_API void setAutogradFallbackMode(AutogradFallbackMode mode);
30
+ TORCH_API AutogradFallbackMode getAutogradFallbackMode();
31
+
32
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <torch/csrc/autograd/function_hook.h>
3
+ #include <functional>
4
+ #include <memory>
5
+
6
+ namespace torch::autograd {
7
+
8
+ using hooks_list =
9
+ std::vector<std::function<at::TensorBase(const at::TensorBase&)>>;
10
+
11
+ struct CppFunctionTensorPreHook : public FunctionPreHook {
12
+ CppFunctionTensorPreHook(std::shared_ptr<hooks_list> hooks, size_t value_idx);
13
+ variable_list operator()(const variable_list& values) override;
14
+
15
+ std::shared_ptr<hooks_list> hooks_;
16
+ size_t value_idx_;
17
+ };
18
+
19
+ struct CppFunctionSingleTensorPreHook : public FunctionPreHook {
20
+ CppFunctionSingleTensorPreHook(
21
+ std::function<at::TensorBase(const at::TensorBase&)> hook,
22
+ size_t value_idx);
23
+ variable_list operator()(const variable_list& values) override;
24
+
25
+ std::function<at::TensorBase(const at::TensorBase&)> hook_;
26
+ size_t value_idx_;
27
+ };
28
+
29
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <functional>
5
+ #include <memory>
6
+
7
+ #include <c10/util/hash.h>
8
+
9
+ namespace torch::autograd {
10
+
11
+ struct Node;
12
+
13
+ /// Represents a particular input of a function.
14
+ struct Edge {
15
+ Edge() noexcept : function(nullptr), input_nr(0) {}
16
+
17
+ Edge(std::shared_ptr<Node> function_, uint32_t input_nr_) noexcept
18
+ : function(std::move(function_)), input_nr(input_nr_) {}
19
+
20
+ /// Convenience method to test if an edge is valid.
21
+ bool is_valid() const noexcept {
22
+ return function != nullptr;
23
+ }
24
+
25
+ // Required for use in associative containers.
26
+ bool operator==(const Edge& other) const noexcept {
27
+ return this->function == other.function && this->input_nr == other.input_nr;
28
+ }
29
+
30
+ bool operator!=(const Edge& other) const noexcept {
31
+ return !(*this == other);
32
+ }
33
+
34
+ /// The function this `Edge` points to.
35
+ std::shared_ptr<Node> function;
36
+
37
+ /// The identifier of a particular input to the function.
38
+ uint32_t input_nr;
39
+ };
40
+ } // namespace torch::autograd
41
+
42
+ // The idiomatic way of enabling use of a custom type as the key of hash
43
+ // containers in C++11. This method removes the requirement of having to pass
44
+ // a custom hasher to std::unordered_{map, set}.
45
+ // See http://en.cppreference.com/w/cpp/utility/hash for more information.
46
+ namespace std {
47
+ template <>
48
+ struct hash<torch::autograd::Edge> {
49
+ // These type aliases are required by the standard.
50
+ using argument_type = torch::autograd::Edge;
51
+ using return_type = size_t;
52
+ return_type operator()(const argument_type& edge) const noexcept {
53
+ return c10::get_hash(edge.function, edge.input_nr);
54
+ }
55
+ };
56
+ } // namespace std
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // Engine implements backpropagation from output variables and their gradients
4
+ // to "root" variables (variables created by the user with requires_grad=True).
5
+
6
+ #include <ATen/Tensor.h>
7
+ #include <ATen/ThreadLocalState.h>
8
+ #include <ATen/core/ivalue.h>
9
+ #include <torch/csrc/Export.h>
10
+ #include <torch/csrc/autograd/anomaly_mode.h>
11
+ #include <torch/csrc/autograd/function.h>
12
+ #include <torch/csrc/autograd/functions/basic_ops.h>
13
+ #include <torch/csrc/autograd/graph_task.h>
14
+ #include <torch/csrc/autograd/input_buffer.h>
15
+ #include <torch/csrc/autograd/saved_variable_hooks.h>
16
+ #include <torch/csrc/autograd/utils/warnings.h>
17
+
18
+ #include <c10/util/CallOnce.h>
19
+
20
+ #include <exception>
21
+ #include <functional>
22
+ #include <memory>
23
+ #include <queue>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ namespace torch::autograd {
28
+ struct ReadyQueue;
29
+ }
30
+
31
+ namespace torch::autograd {
32
+
33
+ // Maximum reentrant backward depth before switching to a new thread
34
+ // This limit is based on the TSAN's deadlock detector, where it will
35
+ // fail if a program hold more than 65 locks in one thread at once.
36
+ // As we hold mutex in every of our custom C++ autograd Node, we would
37
+ // like to avoid TSAN complains on this when doing reentrant backwards
38
+ // For reference, see https://github.com/google/sanitizers/issues/950
39
+ static constexpr int MAX_DEPTH = 60;
40
+
41
+ void set_device(int device);
42
+ TORCH_API void validate_outputs(
43
+ const edge_list& edges,
44
+ variable_list& grads,
45
+ const std::function<std::string(const std::string&)>& format_error);
46
+
47
+ struct NodeTask {
48
+ std::weak_ptr<GraphTask> base_;
49
+ std::shared_ptr<Node> fn_;
50
+ // This buffer serves as an implicit "addition" node for all of the
51
+ // gradients flowing here. Once all the dependencies are finished, we
52
+ // use the contents of this buffer to run the function.
53
+ InputBuffer inputs_;
54
+ // When worker receives a task with isShutdownTask = true, it will immediately
55
+ // exit. The engine sends a shutdown task to every queue upon its destruction.
56
+ bool isShutdownTask_;
57
+
58
+ int getReentrantDepth() const;
59
+
60
+ NodeTask(
61
+ std::weak_ptr<GraphTask> base,
62
+ std::shared_ptr<Node> fn,
63
+ InputBuffer inputs,
64
+ bool isShutdownTask = false)
65
+ : base_(std::move(base)),
66
+ fn_(std::move(fn)),
67
+ inputs_(std::move(inputs)),
68
+ isShutdownTask_(isShutdownTask) {}
69
+ };
70
+
71
+ // Guard that sets and restores checkpoint_valid
72
+ class CheckpointValidGuard {
73
+ public:
74
+ explicit CheckpointValidGuard(
75
+ const std::shared_ptr<const GraphTask>& graph_task);
76
+ ~CheckpointValidGuard();
77
+
78
+ private:
79
+ bool prev_checkpoint_valid_state;
80
+ };
81
+
82
+ struct ReadyQueue {
83
+ private:
84
+ // Returns true when t2 should be (weakly) BEFORE t1 in the queue.
85
+ // Shutdown tasks are first and then empty NodeTask are next.
86
+ struct CompareNodeTaskTime {
87
+ bool operator()(NodeTask const& t1, NodeTask const& t2) {
88
+ // NOLINTNEXTLINE(bugprone-branch-clone)
89
+ if (t2.isShutdownTask_) {
90
+ return true;
91
+ } else if (!t1.fn_ || t1.isShutdownTask_) {
92
+ return false;
93
+ } else if (!t2.fn_) {
94
+ return true;
95
+ } else if (t1.getReentrantDepth() == t2.getReentrantDepth()) {
96
+ return t1.fn_->sequence_nr() < t2.fn_->sequence_nr();
97
+ } else {
98
+ return t1.getReentrantDepth() < t2.getReentrantDepth();
99
+ }
100
+ }
101
+ };
102
+
103
+ // To notify threads waiting on the ReadyQueue of available tasks on the heap_
104
+ std::condition_variable not_empty_;
105
+ // To protect read and writes to heap_
106
+ mutable std::mutex mutex_;
107
+
108
+ std::priority_queue<NodeTask, std::vector<NodeTask>, CompareNodeTaskTime>
109
+ heap_;
110
+
111
+ public:
112
+ // incrementOutstandingTasks indicates whether or not we should increment
113
+ // 'outstanding_tasks_' for the associated GraphTask. This should mostly
114
+ // always be true and is only set false in certain cases (see docs for
115
+ // DistEngine.execute_graph_task_until_ready_queue_empty)
116
+ void push(NodeTask item, bool incrementOutstandingTasks = true);
117
+ void pushShutdownTask();
118
+ NodeTask pop();
119
+ bool empty() const;
120
+ size_t size() const;
121
+ };
122
+
123
+ // A single instance of this struct should be created through the whole process
124
+ // lifetime. The worker thread creation logic and Engine's destructor rely on
125
+ // this.
126
+ struct TORCH_API Engine {
127
+ /// Returns a reference to a static `Engine` instance.
128
+ static Engine& get_default_engine();
129
+
130
+ static Engine& get_base_engine();
131
+
132
+ // compiled_autograd needs to live in a different .so file so that it
133
+ // can have python symbols, so we add a layer of indirection
134
+ // see [Note: Compiled Autograd]
135
+ typedef variable_list (*compiled_autograd_fn)(
136
+ const std::shared_ptr<Node>& graph_root,
137
+ GraphTask& graph_task,
138
+ bool accumulate_grad,
139
+ const edge_list& outputs);
140
+ static void set_compiled_autograd(compiled_autograd_fn fn);
141
+
142
+ Engine(const Engine&) = delete;
143
+ Engine(Engine&&) = delete;
144
+ virtual ~Engine();
145
+
146
+ // Given a list of (Node, input number) pairs computes the value of the graph
147
+ // by following next_edge references.
148
+ virtual variable_list execute(
149
+ const edge_list& roots,
150
+ const variable_list& inputs,
151
+ bool keep_graph,
152
+ bool create_graph,
153
+ bool accumulate_grad,
154
+ const edge_list& outputs = {});
155
+
156
+ // Given a pre-populated GraphTask and GraphRoot, computes the backward pass
157
+ // for the graph.
158
+ //
159
+ // NB: This API should only be used by internal autograd specific
160
+ // machinery and shouldn't be exposed to users in anyway.
161
+ virtual c10::intrusive_ptr<at::ivalue::Future> execute_with_graph_task(
162
+ const std::shared_ptr<GraphTask>& graph_task,
163
+ std::shared_ptr<Node> graph_root,
164
+ InputBuffer&& input_buffer);
165
+
166
+ virtual std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() {
167
+ return std::make_unique<AnomalyMetadata>();
168
+ }
169
+
170
+ virtual std::unique_ptr<SavedVariableHooks> get_default_saved_variable_hooks() {
171
+ return nullptr;
172
+ }
173
+
174
+ // We pass cpu_ready_queue to evaluate_function, so that it knows
175
+ // the correct ready queue to push to after a NodeTask is ready
176
+ void evaluate_function(
177
+ std::shared_ptr<GraphTask>& graph_task,
178
+ Node* func,
179
+ InputBuffer& inputs,
180
+ const std::shared_ptr<ReadyQueue>& cpu_ready_queue);
181
+
182
+ void initialize_device_threads_pool();
183
+ virtual void thread_on_exception(
184
+ std::shared_ptr<GraphTask> graph_task,
185
+ const std::shared_ptr<Node>& fn,
186
+ std::exception& e);
187
+
188
+ void queue_callback(std::function<void()> callback);
189
+
190
+ bool is_checkpoint_valid();
191
+
192
+ // Should be called after fork to notify that worker threads are gone
193
+ void release_workers();
194
+
195
+ // Must be called by subclass before destructing to avoid a data-race-on-vptr.
196
+ void stop();
197
+
198
+ // Initializes a device thread for the autograd engine.
199
+ virtual void thread_init(
200
+ int device,
201
+ const std::shared_ptr<ReadyQueue>& ready_queue,
202
+ bool should_increment = true);
203
+
204
+ protected:
205
+ Engine();
206
+ void compute_dependencies(Node* root, GraphTask& task, uint64_t min_topo_nr);
207
+
208
+ // initialize the thread local ready queue with the ready queue that is
209
+ // created elsewhere (i.e. thread_init, Engine::execute, etc), or create a new
210
+ // ready queue if ready_queue is not provided.
211
+ void init_local_ready_queue(
212
+ std::shared_ptr<ReadyQueue> ready_queue = nullptr);
213
+
214
+ std::shared_ptr<ReadyQueue> ready_queue(
215
+ std::shared_ptr<ReadyQueue> cpu_ready_queue,
216
+ at::Device device);
217
+ std::shared_ptr<ReadyQueue> ready_queue_by_index(
218
+ std::shared_ptr<ReadyQueue> cpu_ready_queue,
219
+ int device_index);
220
+ // start device threads (CUDA, XLA, etc.) in Engine,
221
+ // note that it does NOT start CPU thread.
222
+ void start_device_threads();
223
+ void increment_non_reentrant_thread_count();
224
+ void decrement_non_reentrant_thread_count();
225
+ virtual void thread_main(const std::shared_ptr<GraphTask>& task);
226
+ void reentrant_thread_init();
227
+ void add_thread_pool_task(const std::weak_ptr<GraphTask>& graph_task);
228
+
229
+ // Ensures device_ready_queues_ are initialized only once
230
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
231
+ c10::once_flag start_device_threads_flag_;
232
+ // Safe to read device_ready_queues_ without synchronization after
233
+ // initialization
234
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
235
+ std::vector<std::shared_ptr<ReadyQueue>> device_ready_queues_;
236
+
237
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
238
+ std::vector<std::function<void()>> final_callbacks_;
239
+ // To protect reads and writes to final_callbacks_
240
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
241
+ std::mutex post_callbacks_lock_;
242
+
243
+ // How many nested reentrant calls are allowed until a new thread is used
244
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
245
+ int max_recursion_depth_;
246
+
247
+ struct ThreadPoolShared {
248
+ // Data structures used by the threads for executing reentrant backwards
249
+ // tasks. See Note [Reentrant backwards]
250
+ // Number of available threads for processing new GraphTasks.
251
+ unsigned int num_workers_{0};
252
+ // The threads will wait on work_ to be notified of GraphTasks
253
+ std::condition_variable work_;
254
+ // To protect reads and writes to graphtask_queue_ and num_workers_
255
+ // and for synchronizing creating new threads when needed
256
+ std::mutex mutex_;
257
+ // Workers will process the GraphTasks added to this queue. A GraphTask is
258
+ // allocated inside Engine::execute and lives for the duration of execute
259
+ std::queue<std::weak_ptr<GraphTask>> graphtasks_queue_;
260
+
261
+ ThreadPoolShared() = default;
262
+ };
263
+
264
+ // Temporary workaround until shutting down threads is done
265
+ // We need shared ownership of all these objects because the threads are
266
+ // leaked when Engine shuts down, so there may be threads waiting on work_ for
267
+ // the graphtasks_queue_ to be nonempty.
268
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
269
+ std::shared_ptr<ThreadPoolShared> thread_pool_shared_;
270
+
271
+ private:
272
+ // Number of non-reentrant threads
273
+ std::atomic<uint32_t> non_reentrant_device_thread_count_;
274
+ // Destructor will wait for non-reentrant threads to finish
275
+ std::condition_variable non_reentrant_device_thread_condvar_;
276
+ std::mutex non_reentrant_device_thread_mutex_;
277
+ // stop() must be called before the destruction path goes down to the base
278
+ // class, in order to avoid a data-race-on-vptr. Use this boolean to guard
279
+ // whether stop() has already been called, so we can call this in every
280
+ // destructor of the class hierarchy.
281
+ bool stopped_{false};
282
+ };
283
+
284
+ // allow python_engine to override the default engine when it loads
285
+ using EngineStub = Engine& (*)();
286
+ TORCH_API void set_default_engine_stub(EngineStub stub);
287
+
288
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <unordered_set>
5
+
6
+ namespace torch::autograd {
7
+
8
+ // [ Using ForwardGrad ]
9
+ // ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner
10
+ // design. But this shared_ptr must be uniquely associated with the object that
11
+ // stores it (as of writing, either AutogradMeta or SavedVariable). This object
12
+ // is called the "owning object" in the discussions below. This owning object
13
+ // must call `ForwardGrad::clear()` when it is destroyed to ensure that the
14
+ // ForwardGrad is properly de-allocated.
15
+
16
+ struct ForwardGrad;
17
+
18
+ // This file contains two classes that are used to store forward AD gradients
19
+ // and ensure that they are scoped properly. Because forward AD runs
20
+ // concurrently with the evaluation of the function, we need a mechanism to
21
+ // separate different forward AD invocations and be able to compute the right
22
+ // gradients. We model such invocations as levels here. The particular scoping
23
+ // issue mentioned above has two main drivers:
24
+ // - Ensure that we can conveniently use forward AD within a high level API
25
+ // without
26
+ // leaking the forward AD states outside.
27
+ // - Ensure that we can keep the level that we expose to the user API simple
28
+ // (an integer
29
+ // that represents the nesting depth) while avoiding confusions when the
30
+ // level index is re-used.
31
+
32
+ // The important external APIs from this file are:
33
+ // - ForwardADLevel::get_next_idx() that can be used to enter a new level and
34
+ // get its index
35
+ // - ForwardADLevel::release_idx() that can be used to exit a given level.
36
+ // - ForwardGrad() can be used to store a given forward gradient that will
37
+ // handle the level
38
+ // tracking automatically.
39
+
40
+ // The basic implementation strategy is as follows:
41
+ // Every tensor has a ForwardGrad, maintaining a map from levels to tangents.
42
+ // ForwardGrad is responsible for registering itself to the appropriate
43
+ // ForwardADLevel when a new tangent is added to it via ForwardGrad::set_value
44
+ // and to un-register itself from this same level if that tangent is removed via
45
+ // ForwardGrad::reset. The ForwardADLevel is created when a new level is entered
46
+ // via ForwardADLevel::get_next_idx. A reference to the new ForwardADLevel is
47
+ // stored into a global (for the whole process) vector that ensure it can be
48
+ // accessed via ForwardADLevel::get_by_idx. This reference is deleted when the
49
+ // index is released by the user when calling ForwardADLevel::release_idx. When
50
+ // it is destructed, the ForwardADLevel is responsible for clearing all the
51
+ // tangents for its level stored in all the ForwardGrad that registered with it.
52
+ //
53
+ // This process-wide level design, compared to a thread local one, allows us to
54
+ // use very simple user facing handle for the level (an int) while enabling
55
+ // cross-thread forward AD. The only required synchronization for the user is
56
+ // when entering and exiting the levels. Some discussion on alternative design
57
+ // is in https://github.com/pytorch/pytorch/pull/49097#discussion_r543716453 and
58
+ // can be refined in the future.
59
+
60
+ // Correctness of concurrency:
61
+ // Each class uses its own lock when reading or modifying internal storages.
62
+ // This allows in particular to safely remove tangents from ForwardGrad when the
63
+ // ForwardADLevel is being exited. We ensure no deadlock by ensuring that a
64
+ // methods never calls into another class's method while the local class's lock
65
+ // is held except in one single case: calling from ForwardADLevel's destructor
66
+ // into ForwardGrad::reset with update_level=false.
67
+
68
+ // The lifetime of these objects is as follows:
69
+ // The ForwardADLevel can be in three states:
70
+ // - Initialized: where one of its reference is held by the global vector
71
+ // and there may be more
72
+ // references held by temporary variables in ForwardGrad's methods.
73
+ // - About to be destructed: where "release_idx" has been called and the
74
+ // only reason for the
75
+ // ForwardADLevel not to be destructed right away is that some methods in
76
+ // ForwardGrad have owning reference to it. This is done so that a
77
+ // ForwardADLevel can never be destructed when a ForwardGrad is
78
+ // registered with it and in the process of adding something to its
79
+ // internal state.
80
+ // - Being destructed: Here the ForwardADLevel is not referenced anymore
81
+ // and can be safely reset
82
+ // all of the ForwardGrad. Note that we can have more than one reset
83
+ // being called here (which is ok) but we are guaranteed that there is at
84
+ // least one.
85
+ // The ForwardGrad is simpler as there is no intermediary state and no special
86
+ // destructor for. The logic to unregister it from the different ForwardADLevel
87
+ // is done when the owning object (AutogradMeta or SavedVariable) is being
88
+ // destroyed.
89
+
90
+ // Other considered design:
91
+ // To avoid having the ForwardGrad::clear, we considered storing weak_ptr inside
92
+ // the ForwardADLevel. While this would work, it would mean that the set inside
93
+ // the ForwardADLevel would only grow unless we do an expensive linear scan to
94
+ // remove all the dangling weak pointers. Hence this approach was not used.
95
+
96
+ // Data structures in this file are optimized for this maximum number of levels.
97
+ // The number of levels corresponds to the degree of the gradient being
98
+ // computed using forward AD and we don't expect more than second order
99
+ // gradients to be common.
100
+ #define EXPECTED_MAX_LEVEL 2
101
+
102
+ struct TORCH_API ForwardADLevel {
103
+ ForwardADLevel(uint64_t idx) : idx_(idx) {}
104
+ ~ForwardADLevel();
105
+
106
+ static uint64_t get_next_idx();
107
+ static void release_idx(uint64_t idx);
108
+ static std::shared_ptr<ForwardADLevel> get_by_idx(uint64_t idx);
109
+ static std::shared_ptr<ForwardADLevel> try_get_by_idx(uint64_t idx);
110
+
111
+ void erase(const std::shared_ptr<ForwardGrad>& grad) {
112
+ std::lock_guard<std::mutex> lock(mutex_);
113
+ grads_.erase(grad);
114
+ }
115
+
116
+ void insert(const std::shared_ptr<ForwardGrad>& grad) {
117
+ std::lock_guard<std::mutex> lock(mutex_);
118
+ grads_.insert(grad);
119
+ }
120
+
121
+ private:
122
+ std::unordered_set<std::shared_ptr<ForwardGrad>> grads_;
123
+ std::mutex mutex_;
124
+ uint64_t idx_;
125
+ };
126
+
127
+ struct TORCH_API ForwardGrad : std::enable_shared_from_this<ForwardGrad> {
128
+ ForwardGrad() = default;
129
+
130
+ // This function must only be called when AutogradMeta or SavedVariable is
131
+ // being destructed as it ensures that:
132
+ // - The only (potential) other references to this ForwardGrad are the
133
+ // different level it is registered to
134
+ // - No other thread will try to call `set_value` or `value` ever from now
135
+ // on
136
+ // - Any of the ForwardADLevel that this ForwardGrad is registered with
137
+ // might
138
+ // call `reset` at any point during this function
139
+ void clear() {
140
+ c10::SmallVector<uint64_t, EXPECTED_MAX_LEVEL> levels_idx;
141
+
142
+ {
143
+ std::lock_guard<std::mutex> lock(mutex_);
144
+ for (auto& c : content_) {
145
+ levels_idx.push_back(c.first);
146
+ }
147
+ }
148
+
149
+ for (auto l_idx : levels_idx) {
150
+ // Use "try" version here as another thread might have deleted this
151
+ // level before we got here
152
+ // This is an owning reference as we want to keep the level alive
153
+ // until we successfully unregister ourselves
154
+ auto level = ForwardADLevel::try_get_by_idx(l_idx);
155
+ if (level) {
156
+ level->erase(shared_from_this());
157
+ }
158
+ }
159
+ }
160
+
161
+ void set_value(const at::Tensor& value, uint64_t level) {
162
+ // Owning reference to ensure the forward_level is not destroyed
163
+ // while we are updating our internal state
164
+ auto forward_level = ForwardADLevel::get_by_idx(level);
165
+ forward_level->insert(shared_from_this());
166
+
167
+ std::lock_guard<std::mutex> lock(mutex_);
168
+ content_.insert({level, value});
169
+ }
170
+
171
+ // This function removes the tangent for a given level from this ForwardGrad
172
+ // Use the update_level flag to disable notifying the level about this reset
173
+ // This flag is most notably used by the ForwardADLevel destructor.
174
+ void reset(uint64_t level, bool update_level = true) {
175
+ if (update_level) {
176
+ ForwardADLevel::get_by_idx(level)->erase(shared_from_this());
177
+ }
178
+
179
+ std::unique_lock<std::mutex> lock(mutex_);
180
+ const auto& it = content_.find(level);
181
+ TORCH_INTERNAL_ASSERT(
182
+ it != content_.end(), "Resetting a non-existent level.");
183
+ // Keep the Tensor alive until we have released the lock
184
+ // This is needed as we can be in a case where this function is called by
185
+ // ForwardADLevel destructor
186
+ auto t = (*it).second;
187
+ content_.erase(level);
188
+ lock.unlock();
189
+ }
190
+
191
+ const at::Tensor& value(uint64_t level) const;
192
+
193
+ bool contains(uint64_t level) {
194
+ std::lock_guard<std::mutex> lock(mutex_);
195
+ return content_.count(level) > 0;
196
+ }
197
+
198
+ bool empty() const {
199
+ return content_.empty();
200
+ }
201
+
202
+ static const at::Tensor& undef_grad();
203
+
204
+ private:
205
+ // TODO(albanD): replace this with a SmallVector
206
+ std::unordered_map<uint64_t, at::Tensor> content_;
207
+ mutable std::mutex mutex_;
208
+ };
209
+
210
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/anomaly_mode.h>
4
+ #include <torch/csrc/autograd/edge.h>
5
+ #include <torch/csrc/autograd/grad_mode.h>
6
+ #include <torch/csrc/autograd/graph_task.h>
7
+ #include <torch/csrc/autograd/input_metadata.h>
8
+ #include <torch/csrc/autograd/saved_variable.h>
9
+ #include <torch/csrc/autograd/variable.h>
10
+ #include <torch/csrc/utils/python_stub.h>
11
+ #include <torch/csrc/utils/variadic.h>
12
+
13
+ #include <ATen/SequenceNumber.h>
14
+ #include <ATen/core/Tensor.h>
15
+ #include <ATen/record_function.h>
16
+ #include <c10/util/Exception.h>
17
+ #include <c10/util/irange.h>
18
+
19
+ #include <algorithm>
20
+ #include <cstdint>
21
+ #include <initializer_list>
22
+ #include <memory>
23
+ #include <string>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ namespace torch::autograd {
28
+
29
+ struct Edge;
30
+ struct FunctionPostHook;
31
+ struct FunctionPreHook;
32
+
33
+ using tensor_list = std::vector<at::Tensor>;
34
+ using variable_list = std::vector<Variable>;
35
+ using edge_list = std::vector<Edge>;
36
+ using saved_variable_list = std::vector<SavedVariable>;
37
+ using IndexRange = std::pair<size_t, size_t>;
38
+ using torch::dynamo::autograd::CompiledNodeArgs;
39
+ using torch::dynamo::autograd::SwapSavedVariables;
40
+
41
+ // Custom deleter to prevent stack overflows.
42
+ TORCH_API void deleteNode(Node* function);
43
+
44
+ // Guard that sets and restores the evaluating node
45
+ class NodeGuard {
46
+ public:
47
+ explicit NodeGuard(std::shared_ptr<Node> node);
48
+ ~NodeGuard();
49
+
50
+ private:
51
+ std::shared_ptr<Node> last_evaluating_node_;
52
+ };
53
+
54
+ // Return the Node currently being evaluated (if any)
55
+ // This is only set during the backward pass while a Node is being
56
+ // executed.
57
+ TORCH_API std::shared_ptr<Node> get_current_node();
58
+
59
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
60
+ // Node
61
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
62
+ // A `Node` is an abstract class that represents an operation taking zero
63
+ // or more input `Variable`s and producing zero or more output `Variable`s. All
64
+ // functions in PyTorch's autograd machinery derive from this class and
65
+ // override its `apply` method. Instances of such subclasses will then be
66
+ // invokable via the call operator.
67
+ //
68
+ // Nodes in the Autograd Graph
69
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
70
+ // When viewing the autograd system as a graph, `Node`s are the vertices or
71
+ // nodes, connected to each other via (directed) `Edge`s, which themselves are
72
+ // represented via (`Node`, input_nr) pairs. `Variable`s are the outputs to
73
+ // and inputs of `Node`s, and travel between these edges during execution
74
+ // of the graph. When two or more `Edge`s (from different sources) point at the
75
+ // same input to a `Node`, the values produced along all of these edges are
76
+ // implicitly summed prior to being forwarded to the target `Node`.
77
+ //
78
+ // Hierarchy
79
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
80
+ // Subclasses usually represent differentiable functions as well as their
81
+ // gradient operators. Note, however, that due to the very general definition
82
+ // of a `Node` taking *zero* or more inputs and producing *zero* or more
83
+ // outputs, uses of `Node`s are flexible and extend beyond purely
84
+ // mathematical operations. For example, the `AccumulateGrad` function is a
85
+ // *sink*: it takes one input, but produces no outputs, instead accumulating
86
+ // the input as a side effect. At the other extreme, the `GraphRoot` function
87
+ // receives no inputs from other functions, but produces multiple outputs.
88
+ //
89
+ // Interface
90
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
91
+ // The most important method on `Node` is the call operator, which takes in
92
+ // a list of variables and produces a list of variables. The precise size of
93
+ // these lists can be determined with `num_inputs()` and `num_outputs()`.
94
+ // `Node`s are stitched together via their `next_edge` interface, which let
95
+ // you manipulate the set of outgoing edges of a `Node`. You can add an
96
+ // edge with `add_next_edge()`, retrieve an edge with `next_edge(index)` and
97
+ // iterate over them via the `next_edges()` method. Other methods exist for
98
+ // integration with the JIT and other parts of PyTorch. Every `Node` has a
99
+ // *sequence number* that increases monotonically in the order of `Node`
100
+ // construction. It can be retrieved via the `sequence_nr()` method. Note that
101
+ // this sequence number is *thread local*. This means that when `Node`s
102
+ // `A`, `B` and `C` are created consecutively in the same thread, their
103
+ // sequence numbers will be ordered `A` < `B` < `C`. If, however, `A` and `B`
104
+ // are created in one thread and `C` is created in a new thread, there are *no
105
+ // guarantees* w.r.t. the ordering of `C` relative to `A` or `B`.
106
+ // See NOTE [ Sequence Number] for more details on the usages of sequence
107
+ // number.
108
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
109
+ struct TORCH_API Node : std::enable_shared_from_this<Node> {
110
+ public:
111
+ /// Construct a new `Node` with the given `next_edges`
112
+ explicit Node(uint64_t sequence_nr, edge_list&& next_edges = edge_list())
113
+ : sequence_nr_(sequence_nr), next_edges_(std::move(next_edges)) {
114
+ for (const Edge& edge : next_edges_) {
115
+ update_topological_nr(edge);
116
+ }
117
+
118
+ if (AnomalyMode::is_enabled()) {
119
+ metadata()->store_stack();
120
+
121
+ // If anomaly mode is enabled and graph is constructed, then assign the
122
+ // currently evaluating node as the parent of this node.
123
+ // A parent is a Node where this Node is created.
124
+ // We are tracking the parents to track multiple backward operations.
125
+ assign_parent();
126
+ }
127
+
128
+ // Store the thread_id of the forward operator.
129
+ // See NOTE [ Sequence Numbers ]
130
+ thread_id_ = at::RecordFunction::currentThreadId();
131
+ }
132
+
133
+ explicit Node(edge_list&& next_edges = edge_list())
134
+ : Node(
135
+ /*sequence_nr=*/at::sequence_number::get_and_increment(),
136
+ std::move(next_edges)) {}
137
+
138
+ /// Nodes are neither copyable nor moveable.
139
+ Node(const Node& other) = delete;
140
+ Node(Node&& other) = delete;
141
+ Node& operator=(const Node& other) = delete;
142
+ Node& operator=(Node&& other) = delete;
143
+ virtual ~Node() = default;
144
+
145
+ std::shared_ptr<Node> getptr() {
146
+ return shared_from_this();
147
+ }
148
+ /// Evaluates the function on the given inputs and returns the result of the
149
+ /// function call.
150
+ variable_list operator()(variable_list&& inputs) {
151
+ // In the first iteration of named tensors, autograd ignores names and
152
+ // operates on unnamed tensors. In the long term, autograd should
153
+ // probably operate with names.
154
+ at::NoNamesGuard no_names_guard;
155
+
156
+ #ifdef USE_ROCM
157
+ // Keep track of backward pass for rocblas.
158
+ at::ROCmBackwardPassGuard in_backward;
159
+ #endif
160
+
161
+ auto step_callbacks =
162
+ at::getStepCallbacksUnlessEmpty(at::RecordScope::BACKWARD_FUNCTION);
163
+ if (C10_UNLIKELY(step_callbacks.has_value())) {
164
+ at::RecordFunction guard(std::move(*step_callbacks));
165
+ // Using sequence number and thread id to correlate with
166
+ // the forward pass function
167
+ guard.setForwardThreadId(thread_id_);
168
+ if (guard.needsInputs()) {
169
+ std::vector<c10::IValue> inputs_vec(inputs.begin(), inputs.end());
170
+ guard.before(
171
+ name(),
172
+ c10::ArrayRef<const c10::IValue>(
173
+ inputs_vec.data(), inputs_vec.size()),
174
+ static_cast<int64_t>(sequence_nr()));
175
+ } else {
176
+ guard.before(name(), static_cast<int64_t>(sequence_nr()));
177
+ }
178
+ return apply(std::move(inputs));
179
+ } else {
180
+ return apply(std::move(inputs));
181
+ }
182
+ }
183
+
184
+ // Graph Connectivity API
185
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
186
+
187
+ // Inputs. NOTE: inputs of the grad_fn correspond to Tensor outputs of the
188
+ // forward function.
189
+
190
+ // Marker for expected undefined input
191
+ struct undefined_input {};
192
+
193
+ /// Adds the type and shape metadata for a new input. Returns the index of
194
+ /// of the new input.
195
+ uint32_t add_input_metadata(
196
+ const at::TensorOptions& options,
197
+ c10::SymIntArrayRef shape,
198
+ bool is_tensor_subclass,
199
+ bool is_nested) noexcept {
200
+ uint32_t input_nr = input_metadata_.size();
201
+ auto meta_shape = MetadataShape{std::in_place_type<SymIntSmallVec>, shape};
202
+ input_metadata_.emplace_back(
203
+ options, meta_shape, is_tensor_subclass, is_nested);
204
+ return input_nr;
205
+ }
206
+
207
+ uint32_t add_input_metadata(const at::Tensor& t) noexcept {
208
+ uint32_t input_nr = input_metadata_.size();
209
+ input_metadata_.emplace_back(t);
210
+ return input_nr;
211
+ }
212
+
213
+ /// Adds a placeholder for an input that will not be used.
214
+ uint32_t add_input_metadata(undefined_input u) noexcept {
215
+ uint32_t input_nr = input_metadata_.size();
216
+ input_metadata_.emplace_back();
217
+ return input_nr;
218
+ }
219
+
220
+ uint32_t num_inputs() const noexcept {
221
+ return input_metadata_.size();
222
+ }
223
+
224
+ const InputMetadata& input_metadata(size_t index) const {
225
+ return input_metadata_[index];
226
+ }
227
+
228
+ // Danger: not thread safe, caller must protect with lock
229
+ InputMetadata& mutable_input_metadata(size_t index) {
230
+ return input_metadata_[index];
231
+ }
232
+
233
+ /**
234
+ * Note: Function Streams
235
+ * A function's stream (for a given device type) is the stream of the first
236
+ * element of its input buffer on a device of that type.
237
+ *
238
+ * If all elements are on the same device they MUST share a stream. If
239
+ * elements are on different devices (across multiple GPUs, for example)
240
+ * they may have different streams.
241
+ */
242
+ c10::optional<c10::Stream> stream() {
243
+ auto opt_device_type = at::getAccelerator();
244
+ if (!opt_device_type.has_value()) {
245
+ return c10::nullopt;
246
+ }
247
+ for (const auto& metadata : input_metadata_) {
248
+ if (metadata.device().type() == opt_device_type.value())
249
+ return metadata.stream();
250
+ }
251
+
252
+ return c10::nullopt;
253
+ }
254
+
255
+ void clear_input_metadata() {
256
+ input_metadata_.clear();
257
+ }
258
+
259
+ // Outputs ("Next Edges")
260
+
261
+ void update_topological_nr(const Edge& edge) {
262
+ TORCH_INTERNAL_ASSERT(
263
+ !has_parent_,
264
+ "Cannot update a node's topological_nr after it already has a parent."
265
+ " If we allow this, we can no longer guarantee that a parent's"
266
+ " topo_nr is always greater than those of all its children")
267
+ Node* node = edge.function.get();
268
+ if (node) {
269
+ auto topo_nr = node->topological_nr();
270
+ if (topological_nr_ <= topo_nr) {
271
+ topological_nr_ = topo_nr + 1;
272
+ }
273
+ }
274
+ }
275
+
276
+ void set_next_edge(size_t index, Edge edge) {
277
+ update_topological_nr(edge);
278
+ next_edges_[index] = std::move(edge);
279
+ }
280
+
281
+ void add_next_edge(Edge edge) {
282
+ update_topological_nr(edge);
283
+ next_edges_.emplace_back(std::move(edge));
284
+ }
285
+
286
+ void set_next_edges(edge_list&& next_edges) {
287
+ next_edges_ = std::move(next_edges);
288
+ for (const auto& next_edge : next_edges_) {
289
+ update_topological_nr(next_edge);
290
+ }
291
+ }
292
+
293
+ const Edge& next_edge(size_t index) const noexcept {
294
+ return next_edges_[index];
295
+ }
296
+
297
+ const edge_list& next_edges() const noexcept {
298
+ return next_edges_;
299
+ }
300
+
301
+ edge_list& next_edges() noexcept {
302
+ return next_edges_;
303
+ }
304
+
305
+ uint32_t num_outputs() const noexcept {
306
+ return next_edges_.size();
307
+ }
308
+
309
+ // Miscellaneous Methods
310
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
311
+
312
+ /// NOTE [ Sequence Number]
313
+ ///
314
+ /// The sequence_nr has two main usages in autograd:
315
+ ///
316
+ /// 1) Helps determine the node's execution priority in the engine.
317
+ /// All else being equal, nodes with higher priority numbers are executed
318
+ /// first. Thus, nodes corresponding to ops executed later are the first to
319
+ /// be executed in the backward pass. One caveat is that we prioritize
320
+ /// AccumulateGrad nodes by explicitly setting its sequence_nr to be
321
+ /// UINT64_MAX.
322
+ /// 2) The sequence number of this `Node` is paired with with thread_id it was
323
+ /// created in
324
+ /// as a unique identifier by the profiler to annotate recorded events.
325
+ /// The purpose of this is to help users (and possibly programs)
326
+ /// interpreting the profiler's output to correlate backward nodes with its
327
+ /// forward ops. We need both sequence_nr and thread_id to identify a node
328
+ /// because sequence_nr is thread_local, i.e., starts counting up from zero
329
+ /// in a new thread
330
+ uint64_t sequence_nr() const noexcept {
331
+ return sequence_nr_;
332
+ }
333
+
334
+ void set_sequence_nr(uint64_t sequence_nr) {
335
+ sequence_nr_ = sequence_nr;
336
+ }
337
+
338
+ // NOTE [ Topological Number ]
339
+ //
340
+ // topological_nr is used to prune branches in the DAG during autograd
341
+ // discovery as maintaining topological_nr helps us check in O(1) if there
342
+ // does NOT exist a directed path between two nodes.
343
+ //
344
+ // The topological order number of this `Node` representing the length of the
345
+ // longest possible path from this Node to any leaf node. If you are leaf
346
+ // node, aka AccumulateGrad, this will be zero. This value has the property
347
+ // that For every pair of nodes X, Y in G, existence of a directed path from X
348
+ // to Y implies topo_nr(X) > topo_nr(Y). The converse is not true, however, so
349
+ // we cannot prove existence of a path from X to Y, only non-existence.
350
+ //
351
+ // One assumption we make when using topo_nr is that once a node
352
+ // has been used, i.e., has a parent node, its own topo_nr does not change
353
+ // we have added some checks with the `has_parent_` field to enforce this.
354
+ //
355
+ // What NOT to do:
356
+ //
357
+ // 1) 2 -> 1 -> 0 In this diagram we label nodes with their
358
+ // topo_nr.
359
+ // 2 -> 1 -> 0 We have two simple graphs that can each
360
+ // arise from
361
+ // `t.exp().exp()`, for example.
362
+ // 2) 2 -> 1 -> 0
363
+ // /
364
+ // 2 -> 1 -> 0 We add 2 as a next edge to 1 even though 1
365
+ // already
366
+ // has a parent.
367
+ // 3) 2 -> 1 -> 0
368
+ // /
369
+ // 2 -> 3 -> 0 2 < 3, yet there exists a path from 2 to 3!
370
+ //
371
+ uint64_t topological_nr() const noexcept {
372
+ has_parent_ = true;
373
+ return topological_nr_;
374
+ }
375
+
376
+ // assigning a node as a parent to this node
377
+ void assign_parent();
378
+
379
+ /// Id of the thread that created Node
380
+ uint64_t thread_id() const noexcept {
381
+ return thread_id_;
382
+ }
383
+
384
+ /// Returns the name of the dynamic type of the function, for debugging.
385
+ virtual std::string name() const;
386
+
387
+ /// The difference between functions `should_compute_output` and
388
+ /// `task_should_compute_output`:
389
+ /// - `should_compute_output` should only be used during graph construction
390
+ /// and takes into account only requires_grad information
391
+ /// - `task_should_compute_output` should only be called during the backward
392
+ /// pass (unless called directly through grad_fn) and takes into account the
393
+ /// current graph task. Specifically, the autograd engine trims unnecessary
394
+ /// edges when `inputs` are specified, and during backward untrimmed nodes
395
+ /// left on the graph can/should check `task_should_compute_output` to see if
396
+ /// any outgoing edges have been trimmed by the engine. If that is the case,
397
+ /// gradient computation wrt those edges can be omitted.
398
+ ///
399
+ /// Returns true if the particular output edge is active, and that particular
400
+ /// output of this function should be computed.
401
+ bool should_compute_output(size_t output_edge_index) const {
402
+ TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range");
403
+ return next_edges_[output_edge_index].is_valid();
404
+ }
405
+
406
+ /// Returns true if any of the output edges in any of the ranges are active.
407
+ bool should_compute_output(std::initializer_list<IndexRange> idxs) const {
408
+ return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) {
409
+ for (const auto i : c10::irange(range.first, range.second)) {
410
+ if (should_compute_output(i))
411
+ return true;
412
+ }
413
+ return false;
414
+ });
415
+ }
416
+
417
+ /// Same as the above `should_compute_output` function but will also
418
+ /// check whether this edge is needed within the current graph task.
419
+ bool task_should_compute_output(size_t output_edge_index) const {
420
+ TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range");
421
+ const auto& next = next_edges_[output_edge_index];
422
+ if (next.is_valid()) {
423
+ const auto exec_info = get_current_graph_task_exec_info();
424
+ if (exec_info && !exec_info->empty()) {
425
+ auto it = exec_info->find(next.function.get());
426
+ if (it == exec_info->end() || !it->second.should_execute()) {
427
+ return false; // this edge is not needed for the current graph_task
428
+ }
429
+ }
430
+ return true;
431
+ }
432
+ return false;
433
+ }
434
+
435
+ /// Returns true if any of the output edges in any of the ranges are active
436
+ /// and should be computed in the current graph task.
437
+ bool task_should_compute_output(
438
+ std::initializer_list<IndexRange> idxs) const {
439
+ return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) {
440
+ for (const auto i : c10::irange(range.first, range.second)) {
441
+ if (task_should_compute_output(i))
442
+ return true;
443
+ }
444
+ return false;
445
+ });
446
+ }
447
+
448
+ /// Returns the `PyObject` stored for this `Node` (for Python
449
+ /// interaction).
450
+ PyObject* pyobj() const noexcept {
451
+ return pyobj_;
452
+ }
453
+
454
+ /// Sets the `PyObject` stored for this `Node` (for Python interaction).
455
+ void set_pyobj(PyObject* pyobj) noexcept {
456
+ pyobj_ = pyobj;
457
+ }
458
+
459
+ /// Returns the anomaly metadata stored for this `Node`.
460
+ /// If none exist, creates a new empty one.
461
+ AnomalyMetadata* metadata() noexcept;
462
+
463
+ // Hook API
464
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
465
+
466
+ uintptr_t add_post_hook(std::unique_ptr<FunctionPostHook>&& post_hook) {
467
+ post_hooks_.emplace_back(std::move(post_hook));
468
+ // Use the raw pointer as the unique key to identify this hook. This key
469
+ // can then be used in del_post_hook(key) to remove this hook.
470
+ return reinterpret_cast<std::uintptr_t>(post_hooks_.back().get());
471
+ }
472
+
473
+ const std::vector<std::unique_ptr<FunctionPostHook>>& post_hooks()
474
+ const noexcept {
475
+ return post_hooks_;
476
+ }
477
+
478
+ // delete a post hook matching the key
479
+ bool del_post_hook(const uintptr_t& key) {
480
+ for (auto it = post_hooks_.begin(); it != post_hooks_.end(); ++it) {
481
+ if (key == reinterpret_cast<std::uintptr_t>(it->get())) {
482
+ post_hooks_.erase(it);
483
+ return true;
484
+ }
485
+ }
486
+ return false;
487
+ }
488
+
489
+ std::vector<std::unique_ptr<FunctionPostHook>>& post_hooks() noexcept {
490
+ return post_hooks_;
491
+ }
492
+
493
+ void add_pre_hook(std::unique_ptr<FunctionPreHook>&& pre_hook) {
494
+ pre_hooks_.emplace_back(std::move(pre_hook));
495
+ }
496
+
497
+ void add_tensor_pre_hook(std::unique_ptr<FunctionPreHook>&& pre_hook) {
498
+ tensor_pre_hooks_.emplace_back(std::move(pre_hook));
499
+ }
500
+
501
+ void add_retains_grad_hook(
502
+ std::unique_ptr<FunctionPreHook>&& pre_hook,
503
+ size_t output_idx) {
504
+ retains_grad_hooks_[output_idx] = std::move(pre_hook);
505
+ }
506
+
507
+ std::unique_ptr<FunctionPreHook> pop_retains_grad_hook(size_t output_idx) {
508
+ auto ret = std::move(retains_grad_hooks_[output_idx]);
509
+ retains_grad_hooks_.erase(output_idx);
510
+ return ret;
511
+ }
512
+
513
+ const std::vector<std::unique_ptr<FunctionPreHook>>& pre_hooks()
514
+ const noexcept {
515
+ return pre_hooks_;
516
+ }
517
+
518
+ std::vector<std::unique_ptr<FunctionPreHook>>& pre_hooks() noexcept {
519
+ return pre_hooks_;
520
+ }
521
+
522
+ virtual std::vector<std::unique_ptr<FunctionPreHook>>&
523
+ tensor_pre_hooks() noexcept {
524
+ return tensor_pre_hooks_;
525
+ }
526
+
527
+ virtual std::unique_ptr<PostAccumulateGradHook>&
528
+ tensor_post_acc_grad_hooks() noexcept {
529
+ static std::unique_ptr<PostAccumulateGradHook> empty = nullptr;
530
+ return empty;
531
+ }
532
+
533
+ std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>&
534
+ retains_grad_hooks() noexcept {
535
+ return retains_grad_hooks_;
536
+ }
537
+
538
+ // Customization Points for Subclasses
539
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
540
+
541
+ /// Releases saved variables if the operation won't be reused.
542
+ virtual void release_variables() {}
543
+
544
+ /// Called before an apply if `release_variables()` is going to be called.
545
+ /// Allows larger ops like `InterpreterAutogradFunction` to incrementally
546
+ /// release variables as they run.
547
+ virtual void will_release_variables() {}
548
+
549
+ /// Returns true if this function is traceable. An op is traceable if all
550
+ /// operations happening within `apply()` are performed on autograd
551
+ /// `Variables` (i.e. apply mostly instantiates and applies other functions).
552
+ virtual bool is_traceable() {
553
+ return false;
554
+ }
555
+
556
+ /// A `Node` is said to pass state transparently to backward, if the
557
+ /// state consists only of (Saved)Variables and only non-variable objects
558
+ /// that parameterize the operation in some way that defines the graph
559
+ /// structure AND the backward function is traceable. In particular,
560
+ /// parametrization MUST NOT depend on the data of any `Variable`.
561
+ /// TODO: it might be possible to handle cases where backward is
562
+ /// non-traceable but state passing could be considered transparent. This
563
+ /// will probably depend on saved_variable_list being mutable.
564
+ /// NOTE: this value matters only if is_traceable() returns false.
565
+ virtual bool passes_state_transparently() {
566
+ return false;
567
+ }
568
+
569
+ // see [Note: Compiled Autograd]
570
+ // Used by compiled autograd to
571
+ // 1) Extract tensors/symint args
572
+ // 2) Collect node information for specialization and caching
573
+ // Implementations in subclasses should call args.collect() with all node
574
+ // attrs. These functions are only called durring backward.
575
+ virtual void compiled_args(CompiledNodeArgs& args) {
576
+ throw std::runtime_error(
577
+ std::string("compiled_args not implemented: ") + name());
578
+ }
579
+
580
+ // Used by compiled autograd to call apply() with different saved tensors
581
+ // Implementations should call saved.before() on all attrs, then apply(), then
582
+ // saved.after() on all attrs in the same order.
583
+ virtual variable_list apply_with_saved(
584
+ const variable_list& inputs,
585
+ SwapSavedVariables& saved) {
586
+ throw std::runtime_error(
587
+ std::string("apply_with_saved not implemented: ") + name());
588
+ }
589
+
590
+ protected:
591
+ /// Performs the `Node`'s actual operation.
592
+ virtual variable_list apply(variable_list&& inputs) = 0;
593
+
594
+ /// Calls `apply()`, but instruments it with tracing machinery.
595
+ variable_list traced_apply(variable_list inputs);
596
+
597
+ // Sequence number used to correlate backward nodes with forward ops in the
598
+ // profiler and provide determinism in the engine.
599
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
600
+ uint64_t sequence_nr_;
601
+
602
+ // See NOTE [ Topological Number ]
603
+ uint64_t topological_nr_ = 0;
604
+
605
+ // Tracks whether this node has been added as the next_edge of another node
606
+ // via set_next_edge(s), which always calls topological_nr() of all its
607
+ // children See NOTE [ Topological Number ] for why we need this.
608
+ mutable bool has_parent_ = false;
609
+
610
+ // Id of the thread that created the instance
611
+ uint64_t thread_id_ = 0;
612
+
613
+ // Note [Thread Safety on Autograd Node]
614
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
615
+ // Autograd Engine let the owning thread which calls Engine::execute to drive
616
+ // the GraphTask execution, there might be cases that part of the GraphTask is
617
+ // shared across different `backward()` or `grad()` calls, i.e. fork new
618
+ // threads in the middle of the forward and call `backward()` separately from
619
+ // different threads. We need to protect the thread safety on NodeTask to
620
+ // prevent data racing on shared variables read/write.
621
+ //
622
+ // NB: This is only needed for Autograd Nodes that runs on CPU, technically
623
+ // "CUDA", "XLA" nodes don't need locking because device threads are always
624
+ // single threaded.
625
+ //
626
+ // Here we add a thread mutex to help protect the Node's thread safety, so
627
+ // that different threads cannot race the shared data when executing the same
628
+ // NodeTask from multiple CPU threads. It IS the user/developer responsibility
629
+ // to take advantage of this mutex to protect the thread safety of their
630
+ // autograd Node. The general strategy of thread safety on autograd Node:
631
+ //
632
+ // 1. User should lock the mutex during Node::release_variables() if the Node
633
+ // needs
634
+ // to release the variables on the fly, this serve the purpose that when we
635
+ // release saved_variables from one thread, no other threads can release
636
+ // the saved variables concurrently. call the Node::apply(),
637
+ // 2. User should lock the mutex during Node::apply(), this is to ensure Node
638
+ // that
639
+ // writing to the shared variable are not racing across threads (i.e.
640
+ // AccumulateGrad and custom C++ Autograd Node if writing to shared
641
+ // variables )
642
+ // 3. item 2 and item 3 should work together so that when we release saved
643
+ // variables
644
+ // from one thread, no other threads can call Node::apply(), this ensures
645
+ // the variable references from other threads aren't dangling.
646
+ // 4. if the Node don't release any variables and no shared data read/write in
647
+ // the Node
648
+ // i.e. purely functional, user don't need to lock the mutex
649
+ //
650
+ // This way we could protect the thread safety on Autograd Node, but we could
651
+ // still not protect the thread safety on Node pre/post C++ hooks (python
652
+ // hooks are automatically thread safe), we rely on the user to write thread
653
+ // safe C++ hooks if they want the hook to be correctly applied in
654
+ // multithreading environment.
655
+ std::mutex mutex_;
656
+
657
+ edge_list next_edges_;
658
+ PyObject* pyobj_ = nullptr; // weak reference
659
+ std::unique_ptr<AnomalyMetadata> anomaly_metadata_ = nullptr;
660
+
661
+ // NOTE [Hooks ordering]
662
+ // We have 3 separate fields for pre hooks registered to the autograd nodes
663
+ // because the conditions under which they execute are different, and we
664
+ // want more fine-grained control over the order in which different types
665
+ // of hooks are executed.
666
+ // - pre_hooks are only executed when the node itself is executed
667
+ // - tensor_pre_hook is executed as long as the engine traverses over it
668
+ // even if that node won't be executed.
669
+ // - retains_grad_hook are like tensor_pre_hooks except they are always
670
+ // ordered after all other tensor pre hooks
671
+ std::vector<std::unique_ptr<FunctionPreHook>> pre_hooks_;
672
+ std::vector<std::unique_ptr<FunctionPreHook>> tensor_pre_hooks_;
673
+ std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>
674
+ retains_grad_hooks_;
675
+ std::vector<std::unique_ptr<FunctionPostHook>> post_hooks_;
676
+ at::SmallVector<InputMetadata, 2> input_metadata_;
677
+ };
678
+
679
+ /// See Node::is_traceable() for definition.
680
+ struct TraceableFunction : public Node {
681
+ using Node::Node;
682
+ bool is_traceable() final {
683
+ return true;
684
+ }
685
+ };
686
+
687
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
688
+ // Associated Free Nodes
689
+ //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
690
+
691
+ namespace detail {
692
+ // Implementation of `collect_next_edges` (see below).
693
+ struct MakeNextFunctionList : IterArgs<MakeNextFunctionList> {
694
+ edge_list next_edges;
695
+ using IterArgs<MakeNextFunctionList>::operator();
696
+ void operator()(const Variable& variable) {
697
+ if (variable.defined()) {
698
+ next_edges.emplace_back(impl::gradient_edge(variable));
699
+ } else {
700
+ next_edges.emplace_back();
701
+ }
702
+ }
703
+ void operator()(const Variable* variable) {
704
+ operator()(*variable);
705
+ }
706
+ void operator()(const c10::optional<Variable>& variable) {
707
+ if (variable.has_value()) {
708
+ operator()(*variable);
709
+ } else {
710
+ next_edges.emplace_back();
711
+ }
712
+ }
713
+ };
714
+ } // namespace detail
715
+
716
+ /// Create an `Edge` between the given `variable` and the `function`, which is
717
+ /// assumed to be the gradient function of this variable (i.e. the function
718
+ /// through which this variable is backpropagated during the backward pass).
719
+ /// This sets the `grad_fn` property of the `variable`. This function assumes
720
+ /// that the `Variable` is a new input to the gradient function and its
721
+ /// `input_nr` thus equal to `function->num_inputs()`. Additionally, it
722
+ /// increments the `Node`'s number of inputs by one. Approximately
723
+ /// equivalent to `variable.set_gradient_edge(function,
724
+ /// function->add_input_metadata(variable.dispatch_type(), variable.sizes()))`.
725
+ /// If you don't want the `Node`'s `num_inputs` to be incremented, use
726
+ /// `set_gradient_edge` directly.
727
+ inline void create_gradient_edge(
728
+ Variable& variable,
729
+ std::shared_ptr<Node> function) {
730
+ // Copy before move.
731
+ const auto input_nr = function->add_input_metadata(variable);
732
+ impl::set_gradient_edge(variable, {std::move(function), input_nr});
733
+ }
734
+
735
+ /// Return true if any of the variables in the list require a gradient.
736
+ inline bool any_variable_requires_grad(const variable_list& variables) {
737
+ return std::any_of(
738
+ variables.begin(), variables.end(), [](const Variable& variable) {
739
+ return variable.defined() && variable.requires_grad();
740
+ });
741
+ }
742
+
743
+ /// Return the next edges of all the given variables, or tuples of variables.
744
+ template <typename... Variables>
745
+ edge_list collect_next_edges(Variables&&... variables) {
746
+ detail::MakeNextFunctionList make;
747
+ make.apply(std::forward<Variables>(variables)...);
748
+ return std::move(make.next_edges);
749
+ }
750
+
751
+ struct TypeAndSize {
752
+ TypeAndSize() : options(at::TensorOptions()) {}
753
+ /* implicit */
754
+ TypeAndSize(const at::Tensor& t)
755
+ : sym_sizes(t.sym_sizes().vec()), options(t.options()) {}
756
+
757
+ at::Tensor zeros();
758
+
759
+ std::vector<c10::SymInt> sym_sizes;
760
+ at::TensorOptions options;
761
+ };
762
+
763
+ } // namespace torch::autograd
venv/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <torch/csrc/Export.h>
5
+ #include <string>
6
+ #include <vector>
7
+
8
+ namespace torch::dynamo::autograd {
9
+ class CompiledNodeArgs;
10
+ class SwapSavedVariables;
11
+ } // namespace torch::dynamo::autograd
12
+
13
+ // A hook that's called on gradients
14
+
15
+ namespace torch::autograd {
16
+
17
+ using Variable = at::Tensor;
18
+ using variable_list = std::vector<Variable>;
19
+
20
+ struct TORCH_API FunctionPreHook {
21
+ virtual ~FunctionPreHook() = default;
22
+ virtual variable_list operator()(const variable_list& grads) = 0;
23
+ // only implemented for python hooks, registers hook with compiled autograd
24
+ virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
25
+ throw std::runtime_error(
26
+ std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
27
+ typeid(*this).name());
28
+ }
29
+ };
30
+
31
+ struct TORCH_API FunctionPostHook {
32
+ virtual ~FunctionPostHook() = default;
33
+ virtual variable_list operator()(
34
+ const variable_list& outputs /* grad_inputs */,
35
+ const variable_list& inputs /* grad_outputs */) = 0;
36
+ // only implemented for python hooks, registers hook with compiled autograd
37
+ virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
38
+ throw std::runtime_error(
39
+ std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
40
+ typeid(*this).name());
41
+ }
42
+ };
43
+
44
+ struct TORCH_API PostAccumulateGradHook {
45
+ virtual ~PostAccumulateGradHook() = default;
46
+ virtual void operator()(const Variable& tensor) = 0;
47
+ // only implemented for python hooks on nodes, registers hook with compiled
48
+ // autograd
49
+ virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
50
+ throw std::runtime_error(
51
+ std::string("not yet implemented for compiled autograd: ") +
52
+ typeid(*this).name());
53
+ }
54
+
55
+ virtual void apply_with_saved(
56
+ Variable&,
57
+ torch::dynamo::autograd::SwapSavedVariables&) {
58
+ throw std::runtime_error(
59
+ std::string("not yet implemented for compiled autograd: ") +
60
+ typeid(*this).name());
61
+ }
62
+ };
63
+
64
+ } // namespace torch::autograd